diff --git a/cmd/performance/mkcmp/cmd/mkcmp.go b/cmd/performance/mkcmp/cmd/mkcmp.go index 57c5715f4a0b..2a40dd9d5fe0 100644 --- a/cmd/performance/mkcmp/cmd/mkcmp.go +++ b/cmd/performance/mkcmp/cmd/mkcmp.go @@ -35,7 +35,11 @@ var rootCmd = &cobra.Command{ return validateArgs(args) }, RunE: func(cmd *cobra.Command, args []string) error { - return perf.CompareMinikubeStart(context.Background(), os.Stdout, args) + binaries, err := retrieveBinaries(args) + if err != nil { + return err + } + return perf.CompareMinikubeStart(context.Background(), os.Stdout, binaries) }, } @@ -46,6 +50,18 @@ func validateArgs(args []string) error { return nil } +func retrieveBinaries(args []string) ([]*perf.Binary, error) { + binaries := []*perf.Binary{} + for _, a := range args { + binary, err := perf.NewBinary(a) + if err != nil { + return nil, err + } + binaries = append(binaries, binary) + } + return binaries, nil +} + // Execute runs the mkcmp command func Execute() { if err := rootCmd.Execute(); err != nil { diff --git a/hack/jenkins/windows_integration_test_docker.ps1 b/hack/jenkins/windows_integration_test_docker.ps1 new file mode 100644 index 000000000000..b30125c27de7 --- /dev/null +++ b/hack/jenkins/windows_integration_test_docker.ps1 @@ -0,0 +1,34 @@ +# Copyright 2019 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mkdir -p out +gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-amd64.exe out/ +gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/ +gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata . + +./out/minikube-windows-amd64.exe delete --all + +out/e2e-windows-amd64.exe -minikube-start-args="--driver=docker" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m +$env:result=$lastexitcode +# If the last exit code was 0->success, x>0->error +If($env:result -eq 0){$env:status="success"} +Else {$env:status="failure"} + +# $env:SHORT_COMMIT=$env:COMMIT.substring(0, 7) +# to be used later to implement https://github.com/kubernetes/minikube/issues/6593 +$env:target_url="https://storage.googleapis.com/minikube-builds/logs/$env:MINIKUBE_LOCATION/Docker_Windows.txt" +$json = "{`"state`": `"$env:status`", `"description`": `"Jenkins`", `"target_url`": `"$env:target_url`", `"context`": `"Docker_Windows`"}" +Invoke-WebRequest -Uri "https://api.github.com/repos/kubernetes/minikube/statuses/$env:COMMIT`?access_token=$env:access_token" -Body $json -ContentType "application/json" -Method Post -usebasicparsing + +Exit $env:result \ No newline at end of file diff --git a/hack/jenkins/windows_integration_test_hyperv.ps1 b/hack/jenkins/windows_integration_test_hyperv.ps1 index 536c4e35cce8..45cd4f92d0f8 100644 --- a/hack/jenkins/windows_integration_test_hyperv.ps1 +++ b/hack/jenkins/windows_integration_test_hyperv.ps1 @@ -17,9 +17,9 @@ gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/minikube-windows-am gsutil.cmd -m cp gs://minikube-builds/$env:MINIKUBE_LOCATION/e2e-windows-amd64.exe out/ gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata . -./out/minikube-windows-amd64.exe delete +./out/minikube-windows-amd64.exe delete --all -out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m +out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m $env:result=$lastexitcode # If the last exit code was 0->success, x>0->error If($env:result -eq 0){$env:status="success"} diff --git a/pkg/addons/addons.go b/pkg/addons/addons.go index b39ee3f60174..7965ac3a4e95 100644 --- a/pkg/addons/addons.go +++ b/pkg/addons/addons.go @@ -332,7 +332,7 @@ func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]boo var awg sync.WaitGroup - out.T(out.AddonEnable, "Enabling addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")}) + defer out.T(out.AddonEnable, "Enabling addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")}) for _, a := range toEnableList { awg.Add(1) go func(name string) { diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go b/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go index 453edff7037b..ab4a0a5d9486 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go @@ -18,36 +18,37 @@ limitations under the License. package kverify import ( - "fmt" "time" "github.com/golang/glog" "github.com/pkg/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - "k8s.io/minikube/pkg/util/retry" + kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) // WaitForDefaultSA waits for the default service account to be created. func WaitForDefaultSA(cs *kubernetes.Clientset, timeout time.Duration) error { glog.Info("waiting for default service account to be created ...") start := time.Now() - saReady := func() error { + saReady := func() (bool, error) { // equivalent to manual check of 'kubectl --context profile get serviceaccount default' sas, err := cs.CoreV1().ServiceAccounts("default").List(meta.ListOptions{}) if err != nil { glog.Infof("temproary error waiting for default SA: %v", err) - return err + return false, nil } for _, sa := range sas.Items { if sa.Name == "default" { glog.Infof("found service account: %q", sa.Name) - return nil + return true, nil } } - return fmt.Errorf("couldn't find default service account") + return false, nil } - if err := retry.Expo(saReady, 500*time.Millisecond, timeout); err != nil { + + if err := wait.PollImmediate(kconst.APICallRetryInterval, timeout, saReady); err != nil { return errors.Wrapf(err, "waited %s for SA", time.Since(start)) } diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index f4486196c349..1d476ab28c91 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -31,8 +31,13 @@ const ( SystemPodsWaitKey = "system_pods" // DefaultSAWaitKey is the name used in the flags for default service account DefaultSAWaitKey = "default_sa" - // AppsRunning is the name used in the flags for waiting for k8s-apps to be running - AppsRunning = "apps_running" + // AppsRunningKey is the name used in the flags for waiting for k8s-apps to be running + AppsRunningKey = "apps_running" + // NodePressureKey is the name used in the flags detecting node coditions such as + // disk, memory and PID pressure or network not ready. + NodePressureKey = "no_pressure" + // NodeReadyKey is the name used in the flags for waiting for the node status to be ready + NodeReadyKey = "node_ready" ) // vars related to the --wait flag @@ -40,13 +45,13 @@ var ( // DefaultComponents is map of the the default components to wait for DefaultComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true} // NoWaitComponents is map of componets to wait for if specified 'none' or 'false' - NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunning: false} + NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunningKey: false, NodePressureKey: false, NodeReadyKey: false} // AllComponents is map for waiting for all components. - AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunning: true} + AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunningKey: true, NodePressureKey: true, NodeReadyKey: true} // DefaultWaitList is list of all default components to wait for. only names to be used for start flags. DefaultWaitList = []string{APIServerWaitKey, SystemPodsWaitKey} // AllComponentsList list of all valid components keys to wait for. only names to be used used for start flags. - AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey, AppsRunning} + AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey, AppsRunningKey, NodePressureKey, NodeReadyKey} // AppsRunningList running list are valid k8s-app components to wait for them to be running AppsRunningList = []string{ "kube-dns", // coredns diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/node_health.go b/pkg/minikube/bootstrapper/bsutil/kverify/node_health.go new file mode 100644 index 000000000000..04db9574d8c0 --- /dev/null +++ b/pkg/minikube/bootstrapper/bsutil/kverify/node_health.go @@ -0,0 +1,145 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kverify verifies a running kubernetes cluster is healthy +package kverify + +import ( + "fmt" + "runtime" + "time" + + "github.com/golang/glog" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/out" +) + +// NodeConditions verfies that node is not under disk, memory, pid or network pressure. +func NodeConditions(cs *kubernetes.Clientset, drver string) error { + glog.Info("verifying NodePressure condition ...") + start := time.Now() + defer func() { + glog.Infof("duration metric: took %s to wait for NodePressure...", time.Since(start)) + }() + + ns, err := cs.CoreV1().Nodes().List(meta.ListOptions{}) + if err != nil { + return errors.Wrap(err, "list nodes") + } + + for _, n := range ns.Items { + glog.Infof("node storage ephemeral capacity is %s", n.Status.Capacity.StorageEphemeral()) + glog.Infof("node cpu capacity is %s", n.Status.Capacity.Cpu().AsDec()) + for _, c := range n.Status.Conditions { + if c.Type == v1.NodeDiskPressure && c.Status != v1.ConditionTrue { + out.Ln("") + out.ErrT(out.FailureType, "node {{.name}} has unwanted condition {{.condition_type}} : Reason {{.reason}} Message: {{.message}}", out.V{"name": n.Name, "condition_type": c.Type, "reason": c.Reason, "message": c.Message}) + out.WarningT("The node on {{.name}} has ran out of disk space. please consider allocating more disk using or pruning un-used images", out.V{"name": n.Name}) + if driver.IsKIC(drver) && runtime.GOOS != "linux" { + out.T(out.Stopped, "Please increase Docker Desktop's disk image size.") + if runtime.GOOS == "darwin" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) + } + if runtime.GOOS == "windows" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) + } + } else { // VM-drivers + out.T(out.Stopped, "You can specify a larger disk for your cluster using `minikube start --disk` ") + } + out.Ln("") // if there is error message, lets make an empty space for better visilibtly + return fmt.Errorf("node %q has unwanted condition %q : Reason %q Message: %q ", n.Name, c.Type, c.Reason, c.Message) + } + + if c.Type == v1.NodeMemoryPressure && c.Status == v1.ConditionTrue { + out.Ln("") + out.ErrT(out.FailureType, "node {{.name}} has unwanted condition {{.condition_type}} : Reason {{.reason}} Message: {{.message}}", out.V{"name": n.Name, "condition_type": c.Type, "reason": c.Reason, "message": c.Message}) + out.WarningT("The node on {{.name}} has ran of memory.", out.V{"name": n.Name}) + if driver.IsKIC(drver) && runtime.GOOS != "linux" { + out.T(out.Stopped, "Please increase Docker Desktop's memory.") + if runtime.GOOS == "darwin" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) + } + if runtime.GOOS == "windows" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) + } + } else { + out.T(out.Stopped, "You can specify a larger memory size for your cluster using `minikube start --memory` ") + } + out.Ln("") // if there is error message, lets make an empty space for better visilibtly + return fmt.Errorf("node %q has unwanted condition %q : Reason %q Message: %q ", n.Name, c.Type, c.Reason, c.Message) + } + + if c.Type == v1.NodePIDPressure && c.Status == v1.ConditionTrue { + out.Ln("") + out.ErrT(out.FailureType, "node {{.name}} has unwanted condition {{.condition_type}} : Reason {{.reason}} Message: {{.message}}", out.V{"name": n.Name, "condition_type": c.Type, "reason": c.Reason, "message": c.Message}) + out.WarningT("The node has ran out of available PIDs.", out.V{"name": n.Name}) + out.Ln("") + return fmt.Errorf("node %q has unwanted condition %q : Reason %q Message: %q ", n.Name, c.Type, c.Reason, c.Message) + } + + if c.Type == v1.NodeNetworkUnavailable && c.Status == v1.ConditionTrue { + out.Ln("") + out.ErrT(out.FailureType, "node {{.name}} has unwanted condition {{.condition_type}} : Reason {{.reason}} Message: {{.message}}", out.V{"name": n.Name, "condition_type": c.Type, "reason": c.Reason, "message": c.Message}) + out.WarningT("The node networking is not configured correctly.", out.V{"name": n.Name}) + out.Ln("") + return fmt.Errorf("node %q has unwanted condition %q : Reason %q Message: %q ", n.Name, c.Type, c.Reason, c.Message) + } + } + } + + return nil +} + +// WaitForNodeReady waits for a node to be ready +func WaitForNodeReady(cs *kubernetes.Clientset, timeout time.Duration) error { + glog.Info("waiting for node to be ready ...") + start := time.Now() + defer func() { + glog.Infof("duration metric: took %s to wait for WaitForNodeReady...", time.Since(start)) + }() + checkReady := func() (bool, error) { + if time.Since(start) > timeout { + return false, fmt.Errorf("wait for node to be ready timed out") + } + ns, err := cs.CoreV1().Nodes().List(meta.ListOptions{}) + if err != nil { + glog.Infof("error listing nodes will retry: %v", err) + return false, nil + } + + for _, n := range ns.Items { + for _, c := range n.Status.Conditions { + if c.Type == v1.NodeReady && c.Status != v1.ConditionTrue { + glog.Infof("node %q has unwanted condition %q : Reason %q Message: %q. will try. ", n.Name, c.Type, c.Reason, c.Message) + return false, nil + } + } + } + return true, nil + } + + if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, checkReady); err != nil { + return errors.Wrapf(err, "wait node ready") + } + + return nil +} diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/node_pressure.go b/pkg/minikube/bootstrapper/bsutil/kverify/node_pressure.go new file mode 100644 index 000000000000..49853dc6b3e3 --- /dev/null +++ b/pkg/minikube/bootstrapper/bsutil/kverify/node_pressure.go @@ -0,0 +1,179 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kverify verifies a running kubernetes cluster is healthy +package kverify + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +const errTextFormat = "node has unwanted condition %q : Reason %q Message: %q" + +// ErrMemoryPressure is thrown when there is node memory pressure condition +type ErrMemoryPressure struct { + NodeCondition +} + +func (e *ErrMemoryPressure) Error() string { + return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message) +} + +// ErrDiskPressure is thrown when there is node disk pressure condition +type ErrDiskPressure struct { + NodeCondition +} + +func (e *ErrDiskPressure) Error() string { + return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message) +} + +// ErrPIDPressure is thrown when there is node PID pressure condition +type ErrPIDPressure struct { + NodeCondition +} + +func (e *ErrPIDPressure) Error() string { + return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message) +} + +// ErrNetworkNotReady is thrown when there is node condition is network not ready +type ErrNetworkNotReady struct { + NodeCondition +} + +func (e *ErrNetworkNotReady) Error() string { + return fmt.Sprintf(errTextFormat, e.Type, e.Reason, e.Message) +} + +// NodeCondition represents a favorable or unfavorable node condition. +type NodeCondition struct { + Type v1.NodeConditionType + Status v1.ConditionStatus + Reason string + Message string +} + +// DiskPressure detects if the condition is disk pressure +func (pc *NodeCondition) DiskPressure() bool { + return pc.Type == v1.NodeDiskPressure && pc.Status == v1.ConditionTrue +} + +// MemoryPressure detects if the condition is memory pressure +func (pc *NodeCondition) MemoryPressure() bool { + return pc.Type == v1.NodeMemoryPressure && pc.Status == v1.ConditionTrue +} + +// PIDPressure detects if the condition is PID pressure +func (pc *NodeCondition) PIDPressure() bool { + return pc.Type == v1.NodePIDPressure && pc.Status == v1.ConditionTrue +} + +// NetworkUnavailable detects if the condition is PID pressure +func (pc *NodeCondition) NetworkUnavailable() bool { + return pc.Type == v1.NodeNetworkUnavailable && pc.Status == v1.ConditionTrue +} + +// NodePressure verfies that node is not under disk, memory, pid or network pressure. +func NodePressure(cs *kubernetes.Clientset) error { + glog.Info("verifying NodePressure condition ...") + start := time.Now() + defer func() { + glog.Infof("duration metric: took %s to wait for NodePressure...", time.Since(start)) + }() + + ns, err := cs.CoreV1().Nodes().List(meta.ListOptions{}) + if err != nil { + return errors.Wrap(err, "list nodes") + } + + for _, n := range ns.Items { + glog.Infof("node storage ephemeral capacity is %s", n.Status.Capacity.StorageEphemeral()) + glog.Infof("node cpu capacity is %s", n.Status.Capacity.Cpu().AsDec()) + for _, c := range n.Status.Conditions { + pc := NodeCondition{Type: c.Type, Status: c.Status, Reason: c.Reason, Message: c.Message} + if pc.DiskPressure() { + return &ErrDiskPressure{ + NodeCondition: pc, + } + } + + if pc.MemoryPressure() { + return &ErrDiskPressure{ + NodeCondition: pc, + } + } + + if pc.PIDPressure() { + return &ErrPIDPressure{ + NodeCondition: pc, + } + } + + if pc.NetworkUnavailable() { + return &ErrNetworkNotReady{ + NodeCondition: pc, + } + } + + } + } + return nil +} + +// WaitForNodeReady waits for a node to be ready +func WaitForNodeReady(cs *kubernetes.Clientset, timeout time.Duration) error { + glog.Info("waiting for node to be ready ...") + start := time.Now() + defer func() { + glog.Infof("duration metric: took %s to wait for WaitForNodeReady...", time.Since(start)) + }() + checkReady := func() (bool, error) { + if time.Since(start) > timeout { + return false, fmt.Errorf("wait for node to be ready timed out") + } + ns, err := cs.CoreV1().Nodes().List(meta.ListOptions{}) + if err != nil { + glog.Infof("error listing nodes will retry: %v", err) + return false, nil + } + + for _, n := range ns.Items { + for _, c := range n.Status.Conditions { + if c.Type == v1.NodeReady && c.Status != v1.ConditionTrue { + glog.Infof("node %q has unwanted condition %q : Reason %q Message: %q. will try. ", n.Name, c.Type, c.Reason, c.Message) + return false, nil + } + } + } + return true, nil + } + + if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, checkReady); err != nil { + return errors.Wrapf(err, "wait node ready") + } + + return nil +} diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 8f8b74ac4866..e607473ade29 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -21,6 +21,7 @@ import ( "context" "os/exec" "path" + "runtime" "sync" "fmt" @@ -40,6 +41,7 @@ import ( "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/drivers/kic" + "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper" @@ -327,63 +329,68 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error } // WaitForNode blocks until the node appears to be healthy -func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error { +func (k *Bootstrapper) WaitForNode(cc config.ClusterConfig, n config.Node, timeout time.Duration) error { start := time.Now() if !n.ControlPlane { glog.Infof("%s is not a control plane, nothing to wait for", n.Name) return nil } - if !kverify.ShouldWait(cfg.VerifyComponents) { + if !kverify.ShouldWait(cc.VerifyComponents) { glog.Infof("skip waiting for components based on config.") return nil } - - cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + out.T(out.HealthCheck, "Verifying Kubernetes Components:") + cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c}) if err != nil { - return errors.Wrapf(err, "create runtme-manager %s", cfg.KubernetesConfig.ContainerRuntime) + return errors.Wrapf(err, "create runtme-manager %s", cc.KubernetesConfig.ContainerRuntime) } - hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &n, cfg.Driver) + hostname, _, port, err := driver.ControlPaneEndpoint(&cc, &n, cc.Driver) if err != nil { return errors.Wrap(err, "get control plane endpoint") } - if cfg.VerifyComponents[kverify.APIServerWaitKey] { + if cc.VerifyComponents[kverify.APIServerWaitKey] { + out.T(out.CheckOption, "verifying api server ...") client, err := k.client(hostname, port) if err != nil { return errors.Wrap(err, "get k8s client") } - if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, start, timeout); err != nil { + if err := kverify.WaitForAPIServerProcess(cr, k, cc, k.c, start, timeout); err != nil { return errors.Wrap(err, "wait for apiserver proc") } - if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, start, hostname, port, timeout); err != nil { + if err := kverify.WaitForHealthyAPIServer(cr, k, cc, k.c, client, start, hostname, port, timeout); err != nil { return errors.Wrap(err, "wait for healthy API server") } } - if cfg.VerifyComponents[kverify.SystemPodsWaitKey] { + if cc.VerifyComponents[kverify.SystemPodsWaitKey] { + out.T(out.CheckOption, "verifying system pods ...") client, err := k.client(hostname, port) if err != nil { return errors.Wrap(err, "get k8s client") } - if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, start, timeout); err != nil { + if err := kverify.WaitForSystemPods(cr, k, cc, k.c, client, start, timeout); err != nil { return errors.Wrap(err, "waiting for system pods") } } - if cfg.VerifyComponents[kverify.DefaultSAWaitKey] { + if cc.VerifyComponents[kverify.DefaultSAWaitKey] { + out.T(out.CheckOption, "verifying default service account ...") client, err := k.client(hostname, port) if err != nil { return errors.Wrap(err, "get k8s client") } if err := kverify.WaitForDefaultSA(client, timeout); err != nil { + // TODO: medya handle different err type return errors.Wrap(err, "waiting for default service account") } } - if cfg.VerifyComponents[kverify.AppsRunning] { + if cc.VerifyComponents[kverify.AppsRunningKey] { + out.T(out.CheckOption, "verifying apps running ...") client, err := k.client(hostname, port) if err != nil { return errors.Wrap(err, "get k8s client") @@ -393,12 +400,35 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time } } - glog.Infof("duration metric: took %s to wait for : %+v ...", time.Since(start), cfg.VerifyComponents) + if cc.VerifyComponents[kverify.NodePressureKey] { + out.T(out.CheckOption, "verifying node pressure ...") + client, err := k.client(hostname, port) + if err != nil { + return errors.Wrap(err, "get k8s client") + } + if err := kverify.NodePressure(client); err != nil { + adviseNodePressure(err, cc.Name, cc.Driver) + return errors.Wrapf(err, "verifying %s", kverify.NodePressureKey) + } + } + + if cc.VerifyComponents[kverify.NodeReadyKey] { + out.T(out.CheckOption, "verifying node status") + client, err := k.client(hostname, port) + if err != nil { + return errors.Wrap(err, "get k8s client") + } + if err := kverify.WaitForNodeReady(client, timeout); err != nil { + return errors.Wrap(err, "waiting for node to be ready") + } + } + + glog.Infof("duration metric: took %s to wait for : %+v ...", time.Since(start), cc.VerifyComponents) return nil } // needsReset returns whether or not the cluster needs to be reconfigured -func (k *Bootstrapper) needsReset(conf string, hostname string, port int, client *kubernetes.Clientset, version string) bool { +func (k *Bootstrapper) needsReset(conf string, name string, hostname string, driver string, port int, client *kubernetes.Clientset, version string) bool { if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil { glog.Infof("needs reset: configs differ:\n%s", rr.Output()) return true @@ -416,20 +446,26 @@ func (k *Bootstrapper) needsReset(conf string, hostname string, port int, client } if err := kverify.ExpectAppsRunning(client, kverify.AppsRunningList); err != nil { - glog.Infof("needs reset: %v", err) + glog.Infof("needs reset: expect apps running: %v", err) return true } if err := kverify.APIServerVersionMatch(client, version); err != nil { - glog.Infof("needs reset: %v", err) + glog.Infof("needs reset: apiserver version match: %v", err) return true } + if err := kverify.NodePressure(client); err != nil { + adviseNodePressure(err, name, driver) + glog.Infof("needs reset: node pressure: %v", err) + retrun true + } + return false } // restartCluster restarts the Kubernetes cluster configured by kubeadm -func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { +func (k *Bootstrapper) restartCluster(cc config.ClusterConfig) error { glog.Infof("restartCluster start") start := time.Now() @@ -437,7 +473,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Infof("restartCluster took %s", time.Since(start)) }() - version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) + version, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing kubernetes version") } @@ -453,12 +489,12 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Errorf("failed to create compat symlinks: %v", err) } - cp, err := config.PrimaryControlPlane(&cfg) + cp, err := config.PrimaryControlPlane(&cc) if err != nil { return errors.Wrap(err, "primary control plane") } - hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &cp, cfg.Driver) + hostname, _, port, err := driver.ControlPaneEndpoint(&cc, &cp, cc.Driver) if err != nil { return errors.Wrap(err, "control plane") } @@ -470,12 +506,12 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { // If the cluster is running, check if we have any work to do. conf := bsutil.KubeadmYamlPath - if !k.needsReset(conf, hostname, port, client, cfg.KubernetesConfig.KubernetesVersion) { + if !k.needsReset(conf, cc.Name, hostname, cc.Driver, port, client, cc.KubernetesConfig.KubernetesVersion) { glog.Infof("Taking a shortcut, as the cluster seems to be properly configured") return nil } - if err := k.clearStaleConfigs(cfg); err != nil { + if err := k.clearStaleConfigs(cc); err != nil { return errors.Wrap(err, "clearing stale configs") } @@ -483,7 +519,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "cp") } - baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) + baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), phase) cmds := []string{ fmt.Sprintf("%s phase certs all --config %s", baseCmd, conf), fmt.Sprintf("%s phase kubeconfig all --config %s", baseCmd, conf), @@ -500,21 +536,21 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { } } - cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c}) if err != nil { return errors.Wrap(err, "runtime") } // We must ensure that the apiserver is healthy before proceeding - if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForAPIServerProcess(cr, k, cc, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "apiserver healthz") } - if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), hostname, port, kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForHealthyAPIServer(cr, k, cc, k.c, client, time.Now(), hostname, port, kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "apiserver health") } - if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForSystemPods(cr, k, cc, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "system pods") } @@ -839,3 +875,59 @@ func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) err glog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges.", time.Since(start)) return err } + +// adviseNodePressure will advise the user what to do with the pressure error +func adviseNodePressure(err error, name string, drv string) { + if diskErr, ok := err.(*kverify.ErrDiskPressure); ok { + out.ErrLn("") + glog.Warning(diskErr) + out.WarningT("The node {{.name}} has ran out of disk space.", out.V{"name": name}) + // generic advice for all drivers + out.T(out.Tip, "Please free up disk or prune images.") + if driver.IsVM(drv) { + out.T(out.Stopped, "Consider creating a cluster with bigger disk size: `minikube start --disk SIZE_MB` ") + } else if drv == oci.Docker && runtime.GOOS != "linux" { + out.T(out.Stopped, "Consider increasing Docker Desktop's disk size.") + if runtime.GOOS == "darwin" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) + } + if runtime.GOOS == "windows" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) + } + } + out.ErrLn("") + } + + if memErr, ok := err.(*kverify.ErrMemoryPressure); ok { + out.ErrLn("") + glog.Warning(memErr) + out.WarningT("The node {{.name}} has ran out of memory.", out.V{"name": name}) + out.T(out.Tip, "Please free up memory on the cluster.") + if driver.IsVM(drv) { + out.T(out.Stopped, "Consider creating a cluster with larger memory size using `minikube start --memory SIZE_MB` ") + } else if drv == oci.Docker && runtime.GOOS != "linux" { + out.T(out.Stopped, "Consider increasing Docker Desktop's memory size.") + if runtime.GOOS == "darwin" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) + } + if runtime.GOOS == "windows" { + out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) + } + } + out.ErrLn("") + } + + if pidErr, ok := err.(*kverify.ErrPIDPressure); ok { + glog.Warning(pidErr) + out.ErrLn("") + out.WarningT("The node {{.name}} has ran out of available PIDs.", out.V{"name": name}) + out.ErrLn("") + } + + if netErr, ok := err.(*kverify.ErrNetworkNotReady); ok { + glog.Warning(netErr) + out.ErrLn("") + out.WarningT("The node {{.name}} network is not available. Please verify network settings.", out.V{"name": name}) + out.ErrLn("") + } +} diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 1cd400d2ae24..74541750002c 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -69,6 +69,7 @@ var styles = map[StyleEnum]style{ Sad: {Prefix: "😿 "}, ThumbsUp: {Prefix: "👍 "}, ThumbsDown: {Prefix: "👎 "}, + CheckOption: {Prefix: " 🔎 ", LowPrefix: lowIndent}, // Indented bullet Option: {Prefix: " ▪ ", LowPrefix: lowIndent}, // Indented bullet Command: {Prefix: " ▪ ", LowPrefix: lowIndent}, // Indented bullet LogEntry: {Prefix: " "}, // Indent @@ -108,6 +109,7 @@ var styles = map[StyleEnum]style{ Enabling: {Prefix: "🔌 "}, Shutdown: {Prefix: "🛑 "}, Pulling: {Prefix: "🚜 "}, + HealthCheck: {Prefix: "🕵️ "}, Verifying: {Prefix: "🤔 "}, VerifyingNoLine: {Prefix: "🤔 ", OmitNewline: true}, Kubectl: {Prefix: "💗 "}, diff --git a/pkg/minikube/out/style_enum.go b/pkg/minikube/out/style_enum.go index 1437b26823a9..eae3a9a2c442 100644 --- a/pkg/minikube/out/style_enum.go +++ b/pkg/minikube/out/style_enum.go @@ -42,6 +42,7 @@ const ( Sad ThumbsUp ThumbsDown + CheckOption Option Command LogEntry @@ -73,6 +74,7 @@ const ( Enabling Shutdown Pulling + HealthCheck Verifying VerifyingNoLine Kubectl diff --git a/pkg/minikube/perf/binary.go b/pkg/minikube/perf/binary.go new file mode 100644 index 000000000000..5fe6d7f6b9a6 --- /dev/null +++ b/pkg/minikube/perf/binary.go @@ -0,0 +1,108 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package perf + +import ( + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/constants" +) + +type Binary struct { + path string + pr int +} + +const ( + prPrefix = "pr://" +) + +// NewBinary returns a new binary type +func NewBinary(b string) (*Binary, error) { + // If it doesn't have the prefix, assume a path + if !strings.HasPrefix(b, prPrefix) { + return &Binary{ + path: b, + }, nil + } + return newBinaryFromPR(b) +} + +// Name returns the name of the binary +func (b *Binary) Name() string { + if b.pr != 0 { + return fmt.Sprintf("Minikube (PR %d)", b.pr) + } + return filepath.Base(b.path) +} + +// newBinaryFromPR downloads the minikube binary built for the pr by Jenkins from GCS +func newBinaryFromPR(pr string) (*Binary, error) { + pr = strings.TrimPrefix(pr, prPrefix) + // try to convert to int + i, err := strconv.Atoi(pr) + if err != nil { + return nil, errors.Wrapf(err, "converting %s to an integer", pr) + } + + b := &Binary{ + path: localMinikubePath(i), + pr: i, + } + + if err := downloadBinary(remoteMinikubeURL(i), b.path); err != nil { + return nil, errors.Wrapf(err, "downloading minikube") + } + + return b, nil +} + +func remoteMinikubeURL(pr int) string { + return fmt.Sprintf("https://storage.googleapis.com/minikube-builds/%d/minikube-linux-amd64", pr) +} + +func localMinikubePath(pr int) string { + return fmt.Sprintf("%s/minikube-binaries/%d/minikube", constants.DefaultMinipath, pr) +} + +func downloadBinary(url, path string) error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return err + } + + f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0777) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, resp.Body) + return err +} diff --git a/pkg/minikube/perf/start.go b/pkg/minikube/perf/start.go index a942863f8c67..7b7bd28bf31c 100644 --- a/pkg/minikube/perf/start.go +++ b/pkg/minikube/perf/start.go @@ -35,21 +35,26 @@ const ( var ( // For testing - collectTimeMinikubeStart = timeMinikubeStart + collectTimeMinikubeStart = collectTimes ) // CompareMinikubeStart compares the time to run `minikube start` between two minikube binaries -func CompareMinikubeStart(ctx context.Context, out io.Writer, binaries []string) error { - durations, err := collectTimes(ctx, binaries) +func CompareMinikubeStart(ctx context.Context, out io.Writer, binaries []*Binary) error { + durations, err := collectTimeMinikubeStart(ctx, binaries) if err != nil { return err } - fmt.Fprintf(out, "Old binary: %v\nNew binary: %v\nAverage Old: %f\nAverage New: %f\n", durations[0], durations[1], average(durations[0]), average(durations[1])) + for i, d := range durations { + fmt.Fprintf(out, "Results for %s:\n", binaries[i].Name()) + fmt.Fprintf(out, "Times: %v\n", d) + fmt.Fprintf(out, "Average Time: %f\n\n", average(d)) + } + return nil } -func collectTimes(ctx context.Context, binaries []string) ([][]float64, error) { +func collectTimes(ctx context.Context, binaries []*Binary) ([][]float64, error) { durations := make([][]float64, len(binaries)) for i := range durations { durations[i] = make([]float64, runs) @@ -58,9 +63,9 @@ func collectTimes(ctx context.Context, binaries []string) ([][]float64, error) { for r := 0; r < runs; r++ { log.Printf("Executing run %d...", r) for index, binary := range binaries { - duration, err := collectTimeMinikubeStart(ctx, binary) + duration, err := timeMinikubeStart(ctx, binary) if err != nil { - return nil, errors.Wrapf(err, "timing run %d with %s", r, binary) + return nil, errors.Wrapf(err, "timing run %d with %s", r, binary.Name()) } durations[index][r] = duration } @@ -79,12 +84,12 @@ func average(nums []float64) float64 { // timeMinikubeStart returns the time it takes to execute `minikube start` // It deletes the VM after `minikube start`. -func timeMinikubeStart(ctx context.Context, binary string) (float64, error) { - startCmd := exec.CommandContext(ctx, binary, "start") +func timeMinikubeStart(ctx context.Context, binary *Binary) (float64, error) { + startCmd := exec.CommandContext(ctx, binary.path, "start") startCmd.Stdout = os.Stdout startCmd.Stderr = os.Stderr - deleteCmd := exec.CommandContext(ctx, binary, "delete") + deleteCmd := exec.CommandContext(ctx, binary.path, "delete") defer func() { if err := deleteCmd.Run(); err != nil { log.Printf("error deleting minikube: %v", err) diff --git a/pkg/minikube/perf/start_test.go b/pkg/minikube/perf/start_test.go index 539d57500d3a..2d802f7d9ca4 100644 --- a/pkg/minikube/perf/start_test.go +++ b/pkg/minikube/perf/start_test.go @@ -19,86 +19,64 @@ package perf import ( "bytes" "context" - "reflect" "testing" + + "github.com/google/go-cmp/cmp" ) -func mockCollectTimeMinikubeStart(durations []float64) func(ctx context.Context, binary string) (float64, error) { - index := 0 - return func(context.Context, string) (float64, error) { - duration := durations[index] - index++ - return duration, nil +func mockCollectTimes(times [][]float64) func(ctx context.Context, binaries []*Binary) ([][]float64, error) { + return func(ctx context.Context, binaries []*Binary) ([][]float64, error) { + return times, nil } } func TestCompareMinikubeStartOutput(t *testing.T) { + binaries := []*Binary{ + { + path: "minikube1", + }, { + path: "minikube2", + }, + } tests := []struct { description string - durations []float64 + times [][]float64 expected string }{ { description: "standard run", - durations: []float64{4.5, 6}, - expected: "Old binary: [4.5]\nNew binary: [6]\nAverage Old: 4.500000\nAverage New: 6.000000\n", + times: [][]float64{{4.5, 6}, {1, 2}}, + expected: `Results for minikube1: +Times: [4.5 6] +Average Time: 5.250000 + +Results for minikube2: +Times: [1 2] +Average Time: 1.500000 + +`, }, } for _, test := range tests { t.Run(test.description, func(t *testing.T) { - originalCollectTimes := collectTimeMinikubeStart - collectTimeMinikubeStart = mockCollectTimeMinikubeStart(test.durations) + originalCollectTimes := collectTimes + collectTimeMinikubeStart = mockCollectTimes(test.times) defer func() { collectTimeMinikubeStart = originalCollectTimes }() buf := bytes.NewBuffer([]byte{}) - err := CompareMinikubeStart(context.Background(), buf, []string{"", ""}) + err := CompareMinikubeStart(context.Background(), buf, binaries) if err != nil { t.Fatalf("error comparing minikube start: %v", err) } actual := buf.String() - if test.expected != actual { - t.Fatalf("actual output does not match expected output\nActual: %v\nExpected: %v", actual, test.expected) + if diff := cmp.Diff(test.expected, actual); diff != "" { + t.Errorf("machines mismatch (-want +got):\n%s", diff) } }) } } - -func TestCollectTimes(t *testing.T) { - tests := []struct { - description string - durations []float64 - expected [][]float64 - }{ - { - description: "test collect time", - durations: []float64{1, 2}, - expected: [][]float64{ - {1}, - {2}, - }, - }, - } - - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - originalCollectTimes := collectTimeMinikubeStart - collectTimeMinikubeStart = mockCollectTimeMinikubeStart(test.durations) - defer func() { collectTimeMinikubeStart = originalCollectTimes }() - - actual, err := collectTimes(context.Background(), []string{"", ""}) - if err != nil { - t.Fatalf("error collecting times: %v", err) - } - - if !reflect.DeepEqual(actual, test.expected) { - t.Fatalf("actual output does not match expected output\nActual: %v\nExpected: %v", actual, test.expected) - } - }) - } -} - func TestAverage(t *testing.T) { tests := []struct { description string diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index 839be1e1673d..43dfe855c039 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -86,7 +86,7 @@ minikube start [flags] --uuid string Provide VM UUID to restore MAC address (hyperkit driver only) --vm Filter to use only VM Drivers --vm-driver driver DEPRECATED, use driver instead. - --wait strings comma separated list of kubernetes components to verify and wait for after starting a cluster. defaults to "apiserver,system_pods", available options: "apiserver,system_pods,default_sa,apps_running" . other acceptable values are 'all' or 'none', 'true' and 'false' (default [apiserver,system_pods]) + --wait strings comma separated list of kubernetes components to verify and wait for after starting a cluster. defaults to "apiserver,system_pods", available options: "apiserver,system_pods,default_sa,apps_running,no_pressure,node_ready" . other acceptable values are 'all' or 'none', 'true' and 'false' (default [apiserver,system_pods]) --wait-timeout duration max time to wait per Kubernetes core services to be healthy. (default 6m0s) ``` diff --git a/site/content/en/docs/contrib/addons.en.md b/site/content/en/docs/contrib/addons.en.md index 5c8c1a45b6b7..06ec833d01c2 100644 --- a/site/content/en/docs/contrib/addons.en.md +++ b/site/content/en/docs/contrib/addons.en.md @@ -6,6 +6,20 @@ description: > How to develop minikube addons --- +## Testing Addon changes + +Build the minikube binary: + +```shell +make +``` + +Apply addon from your newly built minikube binary: + +```shell +./out/minikube addons enable +``` + ## Adding a New Addon To add a new addon to minikube the following steps are required: @@ -62,6 +76,3 @@ To add a new addon to minikube the following steps are required: }, false, "efk"), } ``` - -* Rebuild minikube using `make out/minikube`. This will put the addon's .yaml binary files into the minikube binary using go-bindata. -* Test addon using `minikube addons enable ` command to start service. diff --git a/test/integration/pause_test.go b/test/integration/pause_test.go index 2c4fc690b205..6f949a87dd75 100644 --- a/test/integration/pause_test.go +++ b/test/integration/pause_test.go @@ -21,6 +21,7 @@ package integration import ( "context" "os/exec" + "strings" "testing" ) @@ -39,6 +40,7 @@ func TestPause(t *testing.T) { validator validateFunc }{ {"Start", validateFreshStart}, + {"SecondStartNoReset", validateStartNoReset}, {"Pause", validatePause}, {"Unpause", validateUnpause}, {"PauseAgain", validatePause}, @@ -54,13 +56,29 @@ func TestPause(t *testing.T) { } func validateFreshStart(ctx context.Context, t *testing.T, profile string) { - args := append([]string{"start", "-p", profile, "--memory=1800", "--install-addons=false", "--wait=false"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=1800", "--install-addons=false", "--wait=all"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err) } } +// validateStartNoReset validates that starting a running cluster won't invoke a reset +func validateStartNoReset(ctx context.Context, t *testing.T, profile string) { + args := []string{"start", "-p", profile, "--alsologtostderr", "-v=5"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + if err != nil { + t.Fatalf("failed to second start a running minikube with args: %q : %v", rr.Command(), err) + } + if !NoneDriver() { + softLog := "The running cluster does not need a reset" + if !strings.Contains(rr.Output(), softLog) { + t.Errorf("expected the second start log outputs to include %q but got: %s", softLog, rr.Output()) + } + } + +} + func validatePause(ctx context.Context, t *testing.T, profile string) { args := []string{"pause", "-p", profile, "--alsologtostderr", "-v=5"} rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))