diff --git a/kubetest/BUILD.bazel b/kubetest/BUILD.bazel index eb070004f3fa..76d8b2215129 100644 --- a/kubetest/BUILD.bazel +++ b/kubetest/BUILD.bazel @@ -28,6 +28,7 @@ go_library( "//kubetest/conformance:go_default_library", "//kubetest/dind:go_default_library", "//kubetest/e2e:go_default_library", + "//kubetest/kubeadmdind:go_default_library", "//kubetest/process:go_default_library", "//kubetest/util:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", @@ -74,6 +75,7 @@ filegroup( "//kubetest/conformance:all-srcs", "//kubetest/dind:all-srcs", "//kubetest/e2e:all-srcs", + "//kubetest/kubeadmdind:all-srcs", "//kubetest/process:all-srcs", "//kubetest/util:all-srcs", ], diff --git a/kubetest/kubeadmdind/BUILD.bazel b/kubetest/kubeadmdind/BUILD.bazel new file mode 100644 index 000000000000..7b7e51b18d59 --- /dev/null +++ b/kubetest/kubeadmdind/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["kubeadm_dind.go"], + importpath = "k8s.io/test-infra/kubetest/kubeadmdind", + visibility = ["//visibility:public"], + deps = ["//kubetest/process:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["kubeadm_dind_test.go"], + embed = [":go_default_library"], + deps = ["//kubetest/process:go_default_library"], +) diff --git a/kubetest/kubeadmdind/OWNERS b/kubetest/kubeadmdind/OWNERS new file mode 100644 index 000000000000..f2e5999e7424 --- /dev/null +++ b/kubetest/kubeadmdind/OWNERS @@ -0,0 +1,8 @@ +approvers: +- leblancd +- pmichali +- rpothier +reviewers: +- leblancd +- pmichali +- rpothier diff --git a/kubetest/kubeadmdind/kubeadm_dind.go b/kubetest/kubeadmdind/kubeadm_dind.go new file mode 100644 index 000000000000..5d42bccfc446 --- /dev/null +++ b/kubetest/kubeadmdind/kubeadm_dind.go @@ -0,0 +1,560 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadmdind implements a kubetest deployer based on the scripts +// in the github.com/kubernetes-sigs/kubeadm-dind-cluster repo. +// This deployer can be used to create a multinode, containerized Kubernetes +// cluster that runs inside a Prow DinD container. +package kubeadmdind + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "k8s.io/test-infra/kubetest/process" +) + +var ( + // Names that are fixed in the Kubeadm DinD scripts + kubeMasterPrefix = "kube-master" + kubeNodePrefix = "kube-node" + + // Systemd service logs to collect on the host container + hostServices = []string{ + "docker", + } + + // Docker commands to run on the host container and embedded node + // containers for log dump + dockerCommands = []struct { + cmd string + logFile string + }{ + {"docker images", "docker_images.log"}, + {"docker ps -a", "docker_ps.log"}, + } + + // Systemd service logs to collect on the master and worker embedded + // node containers for log dump + systemdServices = []string{ + "kubelet", + "docker", + } + masterKubePods = []string{ + "kube-apiserver", + "kube-scheduler", + "kube-controller-manager", + "kube-proxy", + "etcd", + } + nodeKubePods = []string{ + "kube-proxy", + "kube-dns", + } + + // Where to look for (nested) container log files on the node containers + nodeLogDir = "/var/log" + + // Relative path to Kubernetes source tree + kubeOrg = "k8s.io" + kubeRepo = "kubernetes" + + // Kubeadm-DinD-Cluster (kdc) repo and main script + kdcOrg = "github.com/kubernetes-sigs" + kdcRepo = "kubeadm-dind-cluster" + kdcScript = "fixed/dind-cluster-stable.sh" + + // Number of worker nodes to create for testing + numWorkerNodes = "2" + + // Kubeadm-DinD specific flags + kubeadmDinDIPMode = flag.String("kubeadm-dind-ip-mode", "ipv4", "(Kubeadm-DinD only) IP Mode. Can be 'ipv4' (default), 'ipv6', or 'dual-stack'.") +) + +// Deployer is used to implement a kubetest deployer interface +type Deployer struct { + ipMode string + hostCmder execCmder + control *process.Control +} + +// NewDeployer returns a new Kubeadm-DinD Deployer +func NewDeployer(control *process.Control) (*Deployer, error) { + d := &Deployer{ + ipMode: *kubeadmDinDIPMode, + hostCmder: new(hostCmder), + control: control, + } + + switch d.ipMode { + case "ipv4": + // Valid value + case "ipv6", "dual-stack": + log.Printf("Enabling IPv6") + if err := d.run("sysctl -w net.ipv6.conf.all.disable_ipv6=0"); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("configured --ip-mode=%s is not supported for --deployment=kubeadmdind", d.ipMode) + } + + return d, nil +} + +// execCmd executes a command on the host container. +func (d *Deployer) execCmd(cmd string) *exec.Cmd { + return d.hostCmder.execCmd(cmd) +} + +// run runs a command on the host container, and prints any errors. +func (d *Deployer) run(cmd string) error { + err := d.control.FinishRunning(d.execCmd(cmd)) + if err != nil { + fmt.Printf("Error: '%v'", err) + } + return err +} + +// getOutput runs a command on the host container, prints any errors, +// and returns command output. +func (d *Deployer) getOutput(cmd string) ([]byte, error) { + execCmd := d.execCmd(cmd) + o, err := d.control.Output(execCmd) + if err != nil { + log.Printf("Error: '%v'", err) + return nil, err + } + return o, nil +} + +// outputWithStderr runs a command on the host container and returns +// combined stdout and stderr. +func (d *Deployer) outputWithStderr(cmd *exec.Cmd) ([]byte, error) { + var stdOutErr bytes.Buffer + cmd.Stdout = &stdOutErr + cmd.Stderr = &stdOutErr + err := d.control.FinishRunning(cmd) + return stdOutErr.Bytes(), err +} + +// Up brings up a multinode, containerized Kubernetes cluster inside a +// Prow DinD container. +func (d *Deployer) Up() error { + + d.setEnv() + + // Scripts must be run from Kubernetes source directory + kubeDir, err := findPath(kubeOrg, kubeRepo, "") + if err == nil { + err = os.Chdir(kubeDir) + } + if err != nil { + return err + } + + // Bring up a cluster inside the host Prow container + script, err := findPath(kdcOrg, kdcRepo, kdcScript) + if err != nil { + return err + } + return d.run(script + " up") +} + +// setEnv sets environment variables for building and testing +// a cluster. +func (d *Deployer) setEnv() error { + // Set KUBERNETES_CONFORMANCE_TEST so that the master IP address + // is derived from kube config rather than through gcloud. + envMap := map[string]string{ + "NUM_NODES": numWorkerNodes, + "BUILD_KUBEADM": "y", + "BUILD_HYPERKUBE": "y", + "IP_MODE": d.ipMode, + "KUBERNETES_CONFORMANCE_TEST": "y", + "REMOTE_DNS64_V4SERVER": "173.37.87.157", + } + for env, val := range envMap { + if err := os.Setenv(env, val); err != nil { + return err + } + } + return nil +} + +// IsUp determines if a cluster is up based on whether one or more nodes +// is ready. +func (d *Deployer) IsUp() error { + n, err := d.clusterSize() + if err != nil { + return err + } + if n <= 0 { + return fmt.Errorf("cluster found, but %d nodes reported", n) + } + return nil +} + +// DumpClusterLogs copies dumps docker state and service logs for: +// - Host Prow container +// - Kube master node container(s) +// - Kube worker node containers +// to a local artifacts directory. +func (d *Deployer) DumpClusterLogs(localPath, gcsPath string) error { + // Save logs from the host container + if err := d.saveHostLogs(localPath); err != nil { + return err + } + + // Save logs from master node container(s) + if err := d.saveMasterNodeLogs(localPath); err != nil { + return err + } + + // Save logs from worker node containers + return d.saveWorkerNodeLogs(localPath) +} + +// TestSetup builds end-to-end test and ginkgo binaries. +func (d *Deployer) TestSetup() error { + // Build e2e.test and ginkgo binaries + if err := d.run("make WHAT=test/e2e/e2e.test"); err != nil { + return err + } + return d.run("make WHAT=vendor/github.com/onsi/ginkgo/ginkgo") +} + +// Down brings the DinD-based cluster down and cleans up any DinD state +func (d *Deployer) Down() error { + // Bring the cluster down and clean up kubeadm-dind-cluster state + script, err := findPath(kdcOrg, kdcRepo, kdcScript) + if err != nil { + return err + } + clusterDownCommands := []string{ + script + " down", + script + " clean", + } + for _, cmd := range clusterDownCommands { + if err := d.run(cmd); err != nil { + return err + } + } + return nil +} + +// GetClusterCreated is not yet implemented. +func (d *Deployer) GetClusterCreated(gcpProject string) (time.Time, error) { + return time.Time{}, errors.New("not implemented") +} + +// findPath looks for the existence of a file or directory based on a +// a github organization, github repo, and a relative path. It looks +// for the file/directory in this order: +// - $WORKSPACE/// +// - $GOPATH/src/// +// - .// +// - ./ +// and returns the path for the first match or returns an error. +func findPath(gitOrg, gitRepo, gitFile string) (string, error) { + workPath := os.Getenv("WORKSPACE") + if workPath != "" { + workPath = filepath.Join(workPath, gitOrg, gitRepo, gitFile) + } + goPath := os.Getenv("GOPATH") + if goPath != "" { + goPath = filepath.Join(goPath, "src", gitOrg, gitRepo, gitFile) + } + relPath := filepath.Join(gitRepo, gitFile) + paths := []string{workPath, goPath, relPath, gitFile} + for _, path := range paths { + _, err := os.Stat(path) + if err == nil { + return path, nil + } + } + err := fmt.Errorf("could not locate %s/%s/%s", gitOrg, gitRepo, gitFile) + return "", err +} + +// execCmder defines an interface for providing a wrapper for processing +// command line strings before calling os/exec.Command(). +// There are two implementations of this interface defined below: +// - hostCmder: For executing commands locally (e.g. in Prow container). +// - nodeCmder: For executing commands on node containers embedded +// in the Prow container. +type execCmder interface { + execCmd(cmd string) *exec.Cmd +} + +// hostCmder implements the execCmder interface for processing commands +// locally (e.g. in Prow container). +type hostCmder struct{} + +// execCmd splits a command line string into a command (first word) and +// remaining arguments in variadic form, as required by exec.Command(). +func (h *hostCmder) execCmd(cmd string) *exec.Cmd { + words := strings.Fields(cmd) + return exec.Command(words[0], words[1:]...) +} + +// nodeCmder implements the nodeExecCmder interface for processing +// commands in an embedded node container. +type nodeCmder struct { + node string +} + +func newNodeCmder(node string) *nodeCmder { + cmder := new(nodeCmder) + cmder.node = node + return cmder +} + +// execCmd creates an exec.Cmd structure for running a command line on a +// nested node container in the host container. It is equivalent to running +// a command via 'docker exec '. +func (n *nodeCmder) execCmd(cmd string) *exec.Cmd { + args := strings.Fields(fmt.Sprintf("exec %s %s", n.node, cmd)) + return exec.Command("docker", args...) +} + +// getNode returns the node name for a nodeExecCmder +func (n *nodeCmder) getNode() string { + return n.node +} + +// execCmdSaveLog executes a command either in the host container or +// in an embedded node container, and writes the combined stdout and +// stderr to a log file in a local artifacts directory. (Stderr is +// required because running 'docker logs ...' on nodes sometimes +// returns results as stderr). +func (d *Deployer) execCmdSaveLog(cmder execCmder, cmd string, logDir string, logFile string) error { + execCmd := cmder.execCmd(cmd) + o, err := d.outputWithStderr(execCmd) + if err != nil { + log.Printf("%v", err) + if len(o) > 0 { + log.Printf("%s", o) + } + // Ignore the command error and continue collecting logs + return nil + } + logPath := filepath.Join(logDir, logFile) + return ioutil.WriteFile(logPath, o, 0644) +} + +// saveDockerState saves docker state for either a host Prow container +// or an embedded node container. +func (d *Deployer) saveDockerState(cmder execCmder, logDir string) error { + for _, dockerCommand := range dockerCommands { + if err := d.execCmdSaveLog(cmder, dockerCommand.cmd, logDir, dockerCommand.logFile); err != nil { + return err + } + } + return nil +} + +// saveServiceLogs saves logs for a list of systemd services on either +// a host Prow container or an embedded node container. +func (d *Deployer) saveServiceLogs(cmder execCmder, services []string, logDir string) error { + for _, svc := range services { + cmd := fmt.Sprintf("journalctl -u %s.service", svc) + logFile := fmt.Sprintf("%s.log", svc) + if err := d.execCmdSaveLog(cmder, cmd, logDir, logFile); err != nil { + return err + } + } + return nil +} + +// clusterSize determines the number of nodes in a cluster. +func (d *Deployer) clusterSize() (int, error) { + o, err := d.getOutput("kubectl get nodes --no-headers") + if err != nil { + return -1, fmt.Errorf("kubectl get nodes failed: %s\n%s", err, string(o)) + } + trimmed := strings.TrimSpace(string(o)) + if trimmed != "" { + return len(strings.Split(trimmed, "\n")), nil + } + return 0, nil +} + +// Create a local log artifacts directory +func (d *Deployer) makeLogDir(logDir string) error { + cmd := fmt.Sprintf("mkdir -p %s", logDir) + execCmd := d.execCmd(cmd) + return d.control.FinishRunning(execCmd) +} + +// saveHostLogs collects service logs and docker state from the host +// container, and saves the logs in a local artifacts directory. +func (d *Deployer) saveHostLogs(artifactsDir string) error { + log.Printf("Saving logs from host container") + + // Create directory for the host container artifacts + logDir := filepath.Join(artifactsDir, "host-container") + if err := d.run("mkdir -p " + logDir); err != nil { + return err + } + + // Save docker state for the host container + if err := d.saveDockerState(d.hostCmder, logDir); err != nil { + return err + } + + // Copy service logs from the node container + return d.saveServiceLogs(d.hostCmder, hostServices, logDir) +} + +// saveMasterNodeLogs collects docker state, service logs, and Kubernetes +// system pod logs from all nested master node containers that are running +// on the host container, and saves the logs in a local artifacts directory. +func (d *Deployer) saveMasterNodeLogs(artifactsDir string) error { + masters, err := d.detectNodeContainers(kubeMasterPrefix) + if err != nil { + return err + } + for _, master := range masters { + if err := d.saveNodeLogs(master, artifactsDir, systemdServices, masterKubePods); err != nil { + return err + } + } + return nil +} + +// saveWorkerNodeLogs collects docker state, service logs, and Kubernetes +// system pod logs from all nested worker node containers that are running +// on the host container, and saves the logs in a local artifacts directory. +func (d *Deployer) saveWorkerNodeLogs(artifactsDir string) error { + nodes, err := d.detectNodeContainers(kubeNodePrefix) + if err != nil { + return err + } + for _, node := range nodes { + if err := d.saveNodeLogs(node, artifactsDir, systemdServices, nodeKubePods); err != nil { + return err + } + } + return nil +} + +// detectNodeContainers creates a list of names for either all master or all +// worker node containers. It does this by running 'kubectl get nodes ... ' +// and searching for container names that begin with a specified name prefix. +func (d *Deployer) detectNodeContainers(namePrefix string) ([]string, error) { + log.Printf("Looking for container names beginning with '%s'", namePrefix) + o, err := d.getOutput("kubectl get nodes --no-headers") + if err != nil { + return nil, err + } + var nodes []string + trimmed := strings.TrimSpace(string(o)) + if trimmed != "" { + lines := strings.Split(trimmed, "\n") + for _, line := range lines { + fields := strings.Fields(line) + name := fields[0] + if strings.Contains(name, namePrefix) { + nodes = append(nodes, name) + } + } + } + return nodes, nil +} + +// detectKubeContainers creates a list of containers (either running or +// exited) on a master or worker node whose names contain any of a list of +// Kubernetes system pod name substrings. +func (d *Deployer) detectKubeContainers(nodeCmder execCmder, node string, kubePods []string) ([]string, error) { + // Run 'docker ps -a' on the node container + cmd := fmt.Sprintf("docker ps -a") + execCmd := nodeCmder.execCmd(cmd) + o, err := d.control.Output(execCmd) + if err != nil { + log.Printf("Error running '%s' on %s: '%v'", cmd, node, err) + return nil, err + } + // Find container names that contain any of a list of pod name substrings + var containers []string + if trimmed := strings.TrimSpace(string(o)); trimmed != "" { + lines := strings.Split(trimmed, "\n") + for _, line := range lines { + if fields := strings.Fields(line); len(fields) > 0 { + name := fields[len(fields)-1] + if strings.Contains(name, "_POD_") { + // Ignore infra containers + continue + } + for _, pod := range kubePods { + if strings.Contains(name, pod) { + containers = append(containers, name) + break + } + } + } + } + } + return containers, nil +} + +// saveNodeLogs collects docker state, service logs, and Kubernetes +// system pod logs for a given node container, and saves the logs in a local +// artifacts directory. +func (d *Deployer) saveNodeLogs(node string, artifactsDir string, services []string, kubePods []string) error { + log.Printf("Saving logs from node container %s", node) + + // Create directory for node container artifacts + logDir := filepath.Join(artifactsDir, node) + if err := d.run("mkdir -p " + logDir); err != nil { + return err + } + + cmder := newNodeCmder(node) + + // Save docker state for this node + if err := d.saveDockerState(cmder, logDir); err != nil { + return err + } + + // Copy service logs from the node container + if err := d.saveServiceLogs(cmder, services, logDir); err != nil { + return err + } + + // Copy log files for kube system pod containers (running or exited) + // from this node container. + containers, err := d.detectKubeContainers(cmder, node, kubePods) + if err != nil { + return err + } + for _, container := range containers { + cmd := fmt.Sprintf("docker logs %s", container) + logFile := fmt.Sprintf("%s.log", container) + if err := d.execCmdSaveLog(cmder, cmd, logDir, logFile); err != nil { + return err + } + } + return nil +} diff --git a/kubetest/kubeadmdind/kubeadm_dind_test.go b/kubetest/kubeadmdind/kubeadm_dind_test.go new file mode 100644 index 000000000000..e898f8313741 --- /dev/null +++ b/kubetest/kubeadmdind/kubeadm_dind_test.go @@ -0,0 +1,333 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadmdind + +import ( + "os/exec" + "testing" + "time" + + "k8s.io/test-infra/kubetest/process" +) + +// fakeExecCmder implements the execCmder interface for testing how the +// deployer processes executed command output. +type fakeExecCmder struct { + simulatedOutput string // Simulated output + generateError bool // Create a command that causes an error +} + +func newFakeExecCmder(simOutput string, genError bool) *fakeExecCmder { + cmder := new(fakeExecCmder) + cmder.simulatedOutput = simOutput + cmder.generateError = genError + return cmder +} + +// execCmd creates an exec.Cmd structure for either: +// - Echoing a simulated output string to be processed by the deployer. +// - Running a bogus command to cause an execution error to be processed +// by the deployer. +func (f *fakeExecCmder) execCmd(cmd string) *exec.Cmd { + if f.generateError { + return exec.Command("Bogus_Command_to_Cause_an_Error") + } + return exec.Command("echo", f.simulatedOutput) +} + +// fakeNodeCmder implements the execCmder interface for testing how the +// deployer processes output from commands executed on a node. +type fakeNodeCmder struct { + node string + simulatedOutput string // Simulated output + generateError bool // Create a command that causes an error +} + +func newFakeNodeCmder(node, simOutput string, genError bool) *fakeNodeCmder { + cmder := new(fakeNodeCmder) + cmder.node = node + cmder.simulatedOutput = simOutput + cmder.generateError = genError + return cmder +} + +// execCmd creates an exec.Cmd structure for either: +// - Echoing a simulated output string to be processed by the deployer. +// - Running a bogus command to cause an execution error to be processed +// by the deployer. +func (f *fakeNodeCmder) execCmd(cmd string) *exec.Cmd { + if f.generateError { + return exec.Command("Bogus_Command_to_Cause_an_Error") + } + return exec.Command("echo", f.simulatedOutput) +} + +// getNode returns the node name for a fakeNodeCmder +func (f *fakeNodeCmder) getNode() string { + return f.node +} + +// createTestDeployer creates a kubeadmdind deployer for unit testing. +func createTestDeployer(ipMode string) (*Deployer, error) { + *kubeadmDinDIPMode = ipMode + timeout := time.Duration(10) * time.Second + interrupt := time.NewTimer(time.Duration(10) * time.Second) + terminate := time.NewTimer(time.Duration(10) * time.Second) + verbose := false + control := process.NewControl(timeout, interrupt, terminate, verbose) + return NewDeployer(control) +} + +// slicesAreEqual tests whether two slices of strings are of equal length +// and have the same entries, independent of ordering. It assumes that +// entries in the slice being compared against (argument 'sliceA', and by +// extension, both slices) form a set. +func slicesAreEqual(sliceA, sliceB []string) bool { + if len(sliceA) != len(sliceB) { + return false + } + matched := false + for _, stringA := range sliceA { + matched = false + for _, stringB := range sliceB { + if stringB == stringA { + matched = true + break + } + } + if !matched { + return false + } + } + return true +} + +// TestIPModeValidation tests whether the NewDeployer method correctly +// validates configured values for IP mode. +func TestIPModeValidation(t *testing.T) { + testCases := []struct { + ipMode string + expError bool + }{ + { + ipMode: "ipv4", + expError: false, + }, + { + ipMode: "ipv6", + expError: false, + }, + { + ipMode: "dual-stack", + expError: false, + }, + { + ipMode: "twas-bryllyg", + expError: true, + }, + } + for _, tc := range testCases { + _, err := createTestDeployer(tc.ipMode) + switch { + case err != nil && !tc.expError: + t.Errorf("ip mode '%s': Unexpected error: %v", tc.ipMode, err) + continue + case err == nil && tc.expError: + t.Errorf("ip mode '%s': Did not get expected error", tc.ipMode) + continue + } + } +} + +// TestClusterSize tests whether the clusterSize method: +// - Processes a sample 'kubectl get nodes --no-header' output and +// calculates the correct number of nodes, or... +// - Handles 'kubectl get nodes ...' command errors (reports -1 nodes) +func TestClusterSize(t *testing.T) { + d, err := createTestDeployer("ipv4") + if err != nil { + t.Errorf("couldn't create deployer: %v", err) + return + } + + testCases := []struct { + testName string + simOutput string + genError bool + expSize int + }{ + { + testName: "No nodes", + simOutput: "", + expSize: 0, + }, + { + testName: "3-node Cluster", + simOutput: ` +kube-master Ready master 10m v1.11.0 +kube-node-1 Ready 10m v1.11.0 +kube-node-2 Ready 10m v1.11.0 +`, + expSize: 3, + }, + { + testName: "Simulated command error", + genError: true, + expSize: -1, + }, + } + for _, tc := range testCases { + d.hostCmder = newFakeExecCmder(tc.simOutput, tc.genError) + size, err := d.clusterSize() + switch { + case err != nil && !tc.genError: + t.Errorf("test case '%s': Unexpected error %v", tc.testName, err) + continue + case err == nil && tc.genError: + t.Errorf("test case '%s': Did not get expected error", tc.testName) + continue + } + if size != tc.expSize { + t.Errorf("test case '%s': expected size %d, found size %d", tc.testName, tc.expSize, size) + continue + } + } +} + +// TestDetectNodeContainers tests whether detectNodeContainers can +// either correctly process a sample command output for 'kubectl get +// nodes ...', or gracefully handle a command error. Test cases include: +// - Detect master nodes +// - Detect worker nodes +// - Return an empty list upon command error +func TestDetectNodeContainers(t *testing.T) { + d, err := createTestDeployer("ipv4") + if err != nil { + t.Errorf("couldn't create deployer: %v", err) + return + } + + kubectlNodesOutput := ` +kube-master Ready master 1d v1.11.0-alpha.0 +kube-node-1 Ready 1d v1.11.0-alpha.0 +kube-node-2 Ready 1d v1.11.0-alpha.0 +` + testCases := []struct { + testName string + nodePrefix string + genError bool + expNodes []string + }{ + { + testName: "Detect master nodes", + nodePrefix: kubeMasterPrefix, + expNodes: []string{"kube-master"}, + }, + { + testName: "Detect worker nodes", + nodePrefix: kubeNodePrefix, + expNodes: []string{"kube-node-1", "kube-node-2"}, + }, + { + testName: "Check error handling", + nodePrefix: kubeNodePrefix, + genError: true, + expNodes: []string{}, + }, + } + + for _, tc := range testCases { + d.hostCmder = newFakeExecCmder(kubectlNodesOutput, tc.genError) + foundNodes, err := d.detectNodeContainers(tc.nodePrefix) + switch { + case err != nil && !tc.genError: + t.Errorf("test case '%s': Unexpected error: %v", tc.testName, err) + continue + case err == nil && tc.genError: + t.Errorf("test case '%s': Did not get expected error", tc.testName) + continue + } + // Check whether the expected nodes have all been detected + if !slicesAreEqual(tc.expNodes, foundNodes) { + t.Errorf("test case: '%s', Expected nodes: %v, Detected nodes: %v", tc.testName, tc.expNodes, foundNodes) + continue + } + } +} + +// TestDetectKubeContainers tests whether detectKubeContainers can +// either correctly process a sample command output for 'docker ps -a', +// or gracefully handle a command error. Test cases include: +// - Detect Kubernetes system pod containers on a node +// - Return an empty list upon command error +func TestDetectKubeContainers(t *testing.T) { + d, err := createTestDeployer("ipv4") + if err != nil { + t.Errorf("couldn't create deployer: %v", err) + return + } + + dockerPsOutput := "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n" + + + "fba3566d4b43 k8s.gcr.io/k8s-dns-sidecar \"/sidecar --v=2 --log\" 10 minutes ago Up 10 minutes k8s_sidecar_kube-dns-69f5bbc4c7\n" + + "3b7d8cf5b937 k8s.gcr.io/k8s-dns-dnsmasq-nanny \"/dnsmasq-nanny -v=2 \" 10 minutes ago Up 10 minutes k8s_dnsmasq_kube-dns-69f5bbc4c7\n" + + "5aacb0551aa6 k8s.gcr.io/k8s-dns-kube-dns \"/kube-dns --domain=c\" 10 minutes ago Up 10 minutes k8s_kubedns_kube-dns-69f5bbc4c7\n" + + "a4abfb755f58 k8s.gcr.io/pause-amd64:3.1 \"/pause\" 10 minutes ago Up 10 minutes k8s_POD_kube-dns-69f5bbc4c7\n" + + "03d1bb19d515 60e55008753b \"/usr/local/bin/kube-\" 10 minutes ago Up 10 minutes k8s_kube-proxy_kube-proxy-4tzr8\n" + + "1455bc3829d0 k8s.gcr.io/pause-amd64:3.1 \"/pause\" 10 minutes ago Up 10 minutes k8s_POD_kube-proxy-4tzr8\n" + + testCases := []struct { + testName string + genError bool + expContainers []string + }{ + { + testName: "Detect Containers", + genError: false, + expContainers: []string{ + "k8s_sidecar_kube-dns-69f5bbc4c7", + "k8s_dnsmasq_kube-dns-69f5bbc4c7", + "k8s_kubedns_kube-dns-69f5bbc4c7", + "k8s_kube-proxy_kube-proxy-4tzr8", + }, + }, + { + testName: "Check error handling", + genError: true, + expContainers: []string{}, + }, + } + + for _, tc := range testCases { + node := "fakeNodeName" + fakeCmder := newFakeNodeCmder(node, dockerPsOutput, tc.genError) + containers, err := d.detectKubeContainers(fakeCmder, node, nodeKubePods) + switch { + case err != nil && !tc.genError: + t.Errorf("test case '%s': Unexpected error: %v", tc.testName, err) + continue + case err == nil && tc.genError: + t.Errorf("test case '%s': Did not get expected error", tc.testName) + continue + } + // Check whether the expected containers have been detected + if !slicesAreEqual(tc.expContainers, containers) { + t.Errorf("test case: '%s', Expected containers: %v, Detected containers: %v", tc.testName, tc.expContainers, containers) + continue + } + } +} diff --git a/kubetest/main.go b/kubetest/main.go index d4d524019e9c..3cf4d1341134 100644 --- a/kubetest/main.go +++ b/kubetest/main.go @@ -38,6 +38,7 @@ import ( "k8s.io/test-infra/boskos/client" "k8s.io/test-infra/kubetest/conformance" "k8s.io/test-infra/kubetest/dind" + "k8s.io/test-infra/kubetest/kubeadmdind" "k8s.io/test-infra/kubetest/process" "k8s.io/test-infra/kubetest/util" ) @@ -245,6 +246,8 @@ func getDeployer(o *options) (deployer, error) { return newGKE(o.provider, o.gcpProject, o.gcpZone, o.gcpRegion, o.gcpNetwork, o.gcpNodeImage, o.gcpImageFamily, o.gcpImageProject, o.cluster, &o.testArgs, &o.upgradeArgs) case "kops": return newKops(o.provider, o.gcpProject, o.cluster) + case "kubeadm-dind": + return kubeadmdind.NewDeployer(control) case "kubernetes-anywhere": if o.multiClusters.Enabled() { return newKubernetesAnywhereMultiCluster(o.gcpProject, o.gcpZone, o.multiClusters) diff --git a/scenarios/kubernetes_e2e.py b/scenarios/kubernetes_e2e.py index 612de3b19fd9..19b58ca60a3f 100755 --- a/scenarios/kubernetes_e2e.py +++ b/scenarios/kubernetes_e2e.py @@ -573,7 +573,7 @@ def main(args): set_up_kops_aws(mode.workspace, args, mode, cluster, runner_args) elif args.deployment == 'kops' and args.provider == 'gce': set_up_kops_gce(mode.workspace, args, mode, cluster, runner_args) - elif args.gce_ssh: + elif args.gce_ssh and args.provider != 'local': mode.add_gce_ssh(args.gce_ssh, args.gce_pub) # TODO(fejta): delete this?