diff --git a/cmd/minikube/cmd/node.go b/cmd/minikube/cmd/node.go index 7b70780f74ad..39dbac6c7c60 100644 --- a/cmd/minikube/cmd/node.go +++ b/cmd/minikube/cmd/node.go @@ -23,10 +23,9 @@ import ( // nodeCmd represents the set of node subcommands var nodeCmd = &cobra.Command{ - Use: "node", - Short: "Node operations", - Long: "Operations on nodes", - Hidden: true, // This won't be fully functional and thus should not be documented yet + Use: "node", + Short: "Node operations", + Long: "Operations on nodes", Run: func(cmd *cobra.Command, args []string) { exit.UsageT("Usage: minikube node [add|start|stop|delete]") }, diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 9ee9e39f1e7d..d593639b4d04 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -17,12 +17,11 @@ limitations under the License. package cmd import ( - "fmt" - "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" @@ -43,19 +42,25 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error getting config", err) } - //name := profile + strconv.Itoa(len(mc.Nodes)+1) - name := fmt.Sprintf("m%d", len(cc.Nodes)+1) + if driver.BareMetal(cc.Driver) { + out.ErrT(out.FailureType, "none driver does not support multi-node clusters") + } + + name := node.Name(len(cc.Nodes) + 1) out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) - n, err := node.Add(cc, name, cp, worker, "", profile) - if err != nil { - exit.WithError("Error adding node to cluster", err) + // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. + n := config.Node{ + Name: name, + Worker: worker, + ControlPlane: cp, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } - _, err = node.Start(*cc, *n, false, nil) + err = node.Add(cc, n) if err != nil { - exit.WithError("Error starting node", err) + exit.WithError("Error adding node to cluster", err) } out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": profile}) diff --git a/cmd/minikube/cmd/node_delete.go b/cmd/minikube/cmd/node_delete.go index f35c2d2c56f3..8f43749a2768 100644 --- a/cmd/minikube/cmd/node_delete.go +++ b/cmd/minikube/cmd/node_delete.go @@ -46,7 +46,7 @@ var nodeDeleteCmd = &cobra.Command{ err = node.Delete(*cc, name) if err != nil { - out.FatalT("Failed to delete node {{.name}}", out.V{"name": name}) + exit.WithError("deleting node", err) } out.T(out.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name}) diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index c0090b628759..17e3da869433 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,10 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - _, err = node.Start(*cc, *n, false, nil) - if err != nil { - out.FatalT("Failed to start node {{.name}}", out.V{"name": name}) - } + node.Start(*cc, *n, nil, false) }, } diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index 8c78c87ee20c..4a8508ec5d21 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -27,6 +27,7 @@ import ( "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -49,12 +50,20 @@ var sshCmd = &cobra.Command{ if err != nil { exit.WithError("Error getting config", err) } - // TODO: allow choice of node to ssh into - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("Error getting primary control plane", err) + var n *config.Node + if nodeName == "" { + cp, err := config.PrimaryControlPlane(cc) + if err != nil { + exit.WithError("Getting primary control plane", err) + } + n = &cp + } else { + n, _, err = node.Retrieve(cc, nodeName) + if err != nil { + exit.WithCodeT(exit.Unavailable, "Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) + } } - host, err := machine.LoadHost(api, driver.MachineName(*cc, cp)) + host, err := machine.LoadHost(api, driver.MachineName(*cc, *n)) if err != nil { exit.WithError("Error getting host", err) } @@ -67,7 +76,7 @@ var sshCmd = &cobra.Command{ ssh.SetDefaultClient(ssh.External) } - err = machine.CreateSSHShell(api, *cc, cp, args) + err = machine.CreateSSHShell(api, *cc, *n, args) if err != nil { // This is typically due to a non-zero exit code, so no need for flourish. out.ErrLn("ssh: %v", err) @@ -78,5 +87,6 @@ var sshCmd = &cobra.Command{ } func init() { - sshCmd.Flags().BoolVar(&nativeSSHClient, nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") + sshCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") + sshCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to ssh into. Defaults to the primary control plane.") } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index c9018c09d647..75ff63851121 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -120,6 +120,7 @@ const ( autoUpdate = "auto-update-drivers" hostOnlyNicType = "host-only-nic-type" natNicType = "nat-nic-type" + nodes = "nodes" ) var ( @@ -162,7 +163,7 @@ func initMinikubeFlags() { startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).") startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.") startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.") - startCmd.Flags().StringArrayVar(&node.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") + startCmd.Flags().StringArrayVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") startCmd.Flags().String(criSocket, "", "The cri socket path to be used.") startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.") startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".") @@ -171,12 +172,13 @@ func initMinikubeFlags() { startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") + startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.") } // initKubernetesFlags inits the commandline flags for kubernetes related options func initKubernetesFlags() { startCmd.Flags().String(kubernetesVersion, "", "The kubernetes version that the minikube VM will use (ex: v1.2.3)") - startCmd.Flags().Var(&node.ExtraOptions, "extra-config", + startCmd.Flags().Var(&config.ExtraOptions, "extra-config", `A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler @@ -229,8 +231,8 @@ func initNetworkingFlags() { startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers") startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.") startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.") - startCmd.Flags().StringArrayVar(&node.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)") - startCmd.Flags().StringArrayVar(&node.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)") + startCmd.Flags().StringArrayVar(&config.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)") + startCmd.Flags().StringArrayVar(&config.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)") } // startCmd represents the start command @@ -313,7 +315,7 @@ func runStart(cmd *cobra.Command, args []string) { } k8sVersion := getKubernetesVersion(existing) - mc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName) + cc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName) if err != nil { exit.WithError("Failed to generate config", err) } @@ -324,12 +326,12 @@ func runStart(cmd *cobra.Command, args []string) { return } - if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) { + if driver.IsVM(driverName) { url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL)) if err != nil { exit.WithError("Failed to cache ISO", err) } - mc.MinikubeISO = url + cc.MinikubeISO = url } if viper.GetBool(nativeSSH) { @@ -338,12 +340,41 @@ func runStart(cmd *cobra.Command, args []string) { ssh.SetDefaultClient(ssh.External) } - kubeconfig, err := startNode(existing, mc, n) - if err != nil { - exit.WithError("Starting node", err) + var existingAddons map[string]bool + if viper.GetBool(installAddons) { + existingAddons = map[string]bool{} + if existing != nil && existing.Addons != nil { + existingAddons = existing.Addons + } } - if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil { + kubeconfig := node.Start(cc, n, existingAddons, true) + + numNodes := viper.GetInt(nodes) + if numNodes == 1 && existing != nil { + numNodes = len(existing.Nodes) + } + if numNodes > 1 { + if driver.BareMetal(driverName) { + exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.") + } else { + for i := 1; i < numNodes; i++ { + nodeName := node.Name(i + 1) + n := config.Node{ + Name: nodeName, + Worker: true, + ControlPlane: false, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, + } + err := node.Add(&cc, n) + if err != nil { + exit.WithError("adding node", err) + } + } + } + } + + if err := showKubectlInfo(kubeconfig, k8sVersion, cc.Name); err != nil { glog.Errorf("kubectl info: %v", err) } } @@ -383,17 +414,6 @@ func displayEnviron(env []string) { } } -func startNode(existing *config.ClusterConfig, mc config.ClusterConfig, n config.Node) (*kubeconfig.Settings, error) { - var existingAddons map[string]bool - if viper.GetBool(installAddons) { - existingAddons = map[string]bool{} - if existing != nil && existing.Addons != nil { - existingAddons = existing.Addons - } - } - return node.Start(mc, n, true, existingAddons) -} - func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error { if kcs.KeepContext { out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName}) @@ -796,7 +816,7 @@ func validateFlags(cmd *cobra.Command, drvName string) { } // check that kubeadm extra args contain only whitelisted parameters - for param := range node.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { + for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) && !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) { exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param}) @@ -924,8 +944,8 @@ func createNode(cmd *cobra.Command, k8sVersion, kubeNodeName, drvName, repositor HyperkitVSockPorts: viper.GetStringSlice(vsockPorts), NFSShare: viper.GetStringSlice(nfsShare), NFSSharesRoot: viper.GetString(nfsSharesRoot), - DockerEnv: node.DockerEnv, - DockerOpt: node.DockerOpt, + DockerEnv: config.DockerEnv, + DockerOpt: config.DockerOpt, InsecureRegistry: insecureRegistry, RegistryMirror: registryMirror, HostOnlyCIDR: viper.GetString(hostOnlyCIDR), @@ -956,7 +976,7 @@ func createNode(cmd *cobra.Command, k8sVersion, kubeNodeName, drvName, repositor NetworkPlugin: selectedNetworkPlugin, ServiceCIDR: viper.GetString(serviceCIDR), ImageRepository: repository, - ExtraOptions: node.ExtraOptions, + ExtraOptions: config.ExtraOptions, ShouldLoadCachedImages: viper.GetBool(cacheImages), EnableDefaultCNI: selectedEnableDefaultCNI, }, @@ -978,7 +998,7 @@ func setDockerProxy() { continue } } - node.DockerEnv = append(node.DockerEnv, fmt.Sprintf("%s=%s", k, v)) + config.DockerEnv = append(config.DockerEnv, fmt.Sprintf("%s=%s", k, v)) } } } @@ -990,7 +1010,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { if !cmd.Flags().Changed("extra-config") && len(hints.ExtraOptions) > 0 { for _, eo := range hints.ExtraOptions { glog.Infof("auto setting extra-config to %q.", eo) - err = node.ExtraOptions.Set(eo) + err = config.ExtraOptions.Set(eo) if err != nil { err = errors.Wrapf(err, "setting extra option %s", eo) } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 5d97709ca033..44b96bc8e289 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -56,24 +56,35 @@ const ( // Nonexistent means nonexistent Nonexistent = "Nonexistent" // ~state.None + // Irrelevant is used for statuses that aren't meaningful for worker nodes + Irrelevant = "Irrelevant" ) // Status holds string representations of component states type Status struct { + Name string Host string Kubelet string APIServer string Kubeconfig string + Worker bool } const ( minikubeNotRunningStatusFlag = 1 << 0 clusterNotRunningStatusFlag = 1 << 1 k8sNotRunningStatusFlag = 1 << 2 - defaultStatusFormat = `host: {{.Host}} + defaultStatusFormat = `{{.Name}} +host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} kubeconfig: {{.Kubeconfig}} + +` + workerStatusFormat = `{{.Name}} +host: {{.Host}} +kubelet: {{.Kubelet}} + ` ) @@ -104,31 +115,29 @@ var statusCmd = &cobra.Command{ exit.WithError("getting config", err) } - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("getting primary control plane", err) - } - - machineName := driver.MachineName(*cc, cp) - st, err := status(api, machineName) - if err != nil { - glog.Errorf("status error: %v", err) - } - if st.Host == Nonexistent { - glog.Errorf("The %q cluster does not exist!", machineName) - } - - switch strings.ToLower(output) { - case "text": - if err := statusText(st, os.Stdout); err != nil { - exit.WithError("status text failure", err) + var st *Status + for _, n := range cc.Nodes { + machineName := driver.MachineName(*cc, n) + st, err = status(api, machineName, n.ControlPlane) + if err != nil { + glog.Errorf("status error: %v", err) + } + if st.Host == Nonexistent { + glog.Errorf("The %q host does not exist!", machineName) } - case "json": - if err := statusJSON(st, os.Stdout); err != nil { - exit.WithError("status json failure", err) + + switch strings.ToLower(output) { + case "text": + if err := statusText(st, os.Stdout); err != nil { + exit.WithError("status text failure", err) + } + case "json": + if err := statusJSON(st, os.Stdout); err != nil { + exit.WithError("status json failure", err) + } + default: + exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } - default: - exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } os.Exit(exitCode(st)) @@ -140,21 +149,26 @@ func exitCode(st *Status) int { if st.Host != state.Running.String() { c |= minikubeNotRunningStatusFlag } - if st.APIServer != state.Running.String() || st.Kubelet != state.Running.String() { + if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() { c |= clusterNotRunningStatusFlag } - if st.Kubeconfig != Configured { + if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant { c |= k8sNotRunningStatusFlag } return c } -func status(api libmachine.API, name string) (*Status, error) { +func status(api libmachine.API, name string, controlPlane bool) (*Status, error) { + + profile, node := driver.ClusterNameFromMachine(name) + st := &Status{ + Name: node, Host: Nonexistent, APIServer: Nonexistent, Kubelet: Nonexistent, Kubeconfig: Nonexistent, + Worker: !controlPlane, } hs, err := machine.Status(api, name) @@ -193,10 +207,17 @@ func status(api libmachine.API, name string) (*Status, error) { } st.Kubeconfig = Misconfigured - ok, err := kubeconfig.IsClusterInConfig(ip, name) - glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err) - if ok { - st.Kubeconfig = Configured + if !controlPlane { + st.Kubeconfig = Irrelevant + st.APIServer = Irrelevant + } + + if st.Kubeconfig != Irrelevant { + ok, err := kubeconfig.IsClusterInConfig(ip, profile) + glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err) + if ok { + st.Kubeconfig = Configured + } } host, err := machine.LoadHost(api, name) @@ -219,14 +240,16 @@ func status(api libmachine.API, name string) (*Status, error) { st.Kubelet = stk.String() } - sta, err := kverify.APIServerStatus(cr, ip, port) - glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) + if st.APIServer != Irrelevant { + sta, err := kverify.APIServerStatus(cr, ip, port) + glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) - if err != nil { - glog.Errorln("Error apiserver status:", err) - st.APIServer = state.Error.String() - } else { - st.APIServer = sta.String() + if err != nil { + glog.Errorln("Error apiserver status:", err) + st.APIServer = state.Error.String() + } else { + st.APIServer = sta.String() + } } return st, nil @@ -242,6 +265,9 @@ For the list accessible variables for the template, see the struct values here: func statusText(st *Status, w io.Writer) error { tmpl, err := template.New("status").Parse(statusFormat) + if st.Worker && statusFormat == defaultStatusFormat { + tmpl, err = template.New("worker-status").Parse(workerStatusFormat) + } if err != nil { return err } diff --git a/cmd/minikube/cmd/status_test.go b/cmd/minikube/cmd/status_test.go index ef414631f832..b11e549a6de5 100644 --- a/cmd/minikube/cmd/status_test.go +++ b/cmd/minikube/cmd/status_test.go @@ -51,18 +51,18 @@ func TestStatusText(t *testing.T) { }{ { name: "ok", - state: &Status{Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, - want: "host: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, + want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n", }, { name: "paused", - state: &Status{Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, - want: "host: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, + want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n", }, { name: "down", - state: &Status{Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, - want: "host: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", + state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, + want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", }, } for _, tc := range tests { diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index e6d131c7e4b4..005284a02259 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -88,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool switch err := errors.Cause(err).(type) { case mcnerror.ErrHostDoesNotExist: - out.T(out.Meh, `"{{.profile_name}}" does not exist, nothing to stop`, out.V{"profile_name": cluster}) + out.T(out.Meh, `"{{.machineName}}" does not exist, nothing to stop`, out.V{"machineName": machineName}) nonexistent = true return nil default: diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 712446285d6d..5627e1b3c34c 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -23,6 +23,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" ) // LogOptions are options to be passed to LogCommands @@ -38,7 +39,10 @@ type Bootstrapper interface { StartCluster(config.ClusterConfig) error UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error - WaitForCluster(config.ClusterConfig, time.Duration) error + WaitForNode(config.ClusterConfig, config.Node, time.Duration) error + JoinCluster(config.ClusterConfig, config.Node, string) error + UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error + GenerateToken(config.ClusterConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go index d4894876bb55..50f8147d6fb7 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go @@ -56,7 +56,7 @@ kind: ClusterConfiguration {{range $i, $val := .FeatureArgs}}{{$i}}: {{$val}} {{end -}}{{end -}} certificatesDir: {{.CertDir}} -clusterName: kubernetes +clusterName: mk controlPlaneEndpoint: {{.ControlPlaneAddress}}:{{.APIServerPort}} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 3aa1bb52d481..8b675ae644d7 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -29,6 +29,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" ) @@ -37,8 +38,8 @@ import ( const remoteContainerRuntime = "remote" // GenerateKubeadmYAML generates the kubeadm.yaml file -func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) { - k8s := mc.KubernetesConfig +func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Manager) ([]byte, error) { + k8s := cc.KubernetesConfig version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { return nil, errors.Wrap(err, "parsing kubernetes version") @@ -51,7 +52,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N } // In case of no port assigned, use default - cp, err := config.PrimaryControlPlane(&mc) + cp, err := config.PrimaryControlPlane(&cc) if err != nil { return nil, errors.Wrap(err, "getting control plane") } @@ -87,22 +88,23 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N CertDir: vmpath.GuestKubernetesCertsDir, ServiceCIDR: constants.DefaultServiceCIDR, PodSubnet: k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm), - AdvertiseAddress: cp.IP, + AdvertiseAddress: n.IP, APIServerPort: nodePort, KubernetesVersion: k8s.KubernetesVersion, EtcdDataDir: EtcdDataDir(), - ClusterName: k8s.ClusterName, - NodeName: cp.Name, - CRISocket: r.SocketPath(), - ImageRepository: k8s.ImageRepository, - ComponentOptions: componentOpts, - FeatureArgs: kubeadmFeatureArgs, - NoTaintMaster: false, // That does not work with k8s 1.12+ - DNSDomain: k8s.DNSDomain, - NodeIP: n.IP, + ClusterName: cc.Name, + //kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine + NodeName: driver.MachineName(cc, n), + CRISocket: r.SocketPath(), + ImageRepository: k8s.ImageRepository, + ComponentOptions: componentOpts, + FeatureArgs: kubeadmFeatureArgs, + NoTaintMaster: false, // That does not work with k8s 1.12+ + DNSDomain: k8s.DNSDomain, + NodeIP: n.IP, // NOTE: If set to an specific VM IP, things may break if the IP changes on host restart // For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files - ControlPlaneAddress: "localhost", + ControlPlaneAddress: cp.IP, } if k8s.ServiceCIDR != "" { diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index 4c366bd96bf5..a2b53c3ea177 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -108,7 +108,7 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { shouldErr bool cfg config.ClusterConfig }{ - {"dns", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, + {"dns", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, } for _, version := range versions { for _, tc := range tests { @@ -129,7 +129,7 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { cfg.KubernetesConfig.KubernetesVersion = version + ".0" cfg.KubernetesConfig.ClusterName = "kubernetes" - got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0]) + got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime) if err != nil && !tc.shouldErr { t.Fatalf("got unexpected error generating config: %v", err) } @@ -174,15 +174,15 @@ func TestGenerateKubeadmYAML(t *testing.T) { shouldErr bool cfg config.ClusterConfig }{ - {"default", "docker", false, config.ClusterConfig{}}, - {"containerd", "containerd", false, config.ClusterConfig{}}, - {"crio", "crio", false, config.ClusterConfig{}}, - {"options", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, - {"crio-options-gates", "crio", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, - {"unknown-component", "docker", true, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, - {"containerd-api-port", "containerd", false, config.ClusterConfig{Nodes: []config.Node{{Port: 12345}}}}, - {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, - {"image-repository", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, + {"default", "docker", false, config.ClusterConfig{Name: "mk"}}, + {"containerd", "containerd", false, config.ClusterConfig{Name: "mk"}}, + {"crio", "crio", false, config.ClusterConfig{Name: "mk"}}, + {"options", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, + {"crio-options-gates", "crio", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, + {"unknown-component", "docker", true, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, + {"containerd-api-port", "containerd", false, config.ClusterConfig{Name: "mk", Nodes: []config.Node{{Port: 12345}}}}, + {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, + {"image-repository", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, } for _, version := range versions { for _, tc := range tests { @@ -210,7 +210,7 @@ func TestGenerateKubeadmYAML(t *testing.T) { cfg.KubernetesConfig.KubernetesVersion = version + ".0" cfg.KubernetesConfig.ClusterName = "kubernetes" - got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0]) + got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime) if err != nil && !tc.shouldErr { t.Fatalf("got unexpected error generating config: %v", err) } diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 8ec9d01fc671..ce161b41daea 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -26,6 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/util" ) @@ -60,7 +61,7 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage extraOpts["node-ip"] = cp.IP } if nc.Name != "" { - extraOpts["hostname-override"] = nc.Name + extraOpts["hostname-override"] = driver.MachineName(mc, nc) } pauseImage := images.Pause(version, k8s.ImageRepository) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 3902af11987d..052b9937dddc 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -37,6 +37,7 @@ func TestGenerateKubeletConfig(t *testing.T) { { description: "old docker", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.OldestKubernetesVersion, ContainerRuntime: "docker", @@ -62,6 +63,7 @@ ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true -- { description: "newest cri runtime", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.NewestKubernetesVersion, ContainerRuntime: "cri-o", @@ -87,6 +89,7 @@ ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=W { description: "default containerd runtime", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -112,6 +115,7 @@ ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=W { description: "default containerd runtime with IP override", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -144,6 +148,7 @@ ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=W { description: "docker with custom image repository", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "docker", diff --git a/pkg/minikube/bootstrapper/bsutil/ops.go b/pkg/minikube/bootstrapper/bsutil/ops.go index bf855a9210e1..d364aa074862 100644 --- a/pkg/minikube/bootstrapper/bsutil/ops.go +++ b/pkg/minikube/bootstrapper/bsutil/ops.go @@ -47,7 +47,7 @@ func AdjustResourceLimits(c command.Runner) error { return nil } -// ExistingConfig checks if there are config files from possible previous kubernets cluster +// ExistingConfig checks if there are config files from possible previous kubernetes cluster func ExistingConfig(c command.Runner) error { args := append([]string{"ls"}, expectedRemoteArtifacts...) _, err := c.RunCmd(exec.Command("sudo", args...)) diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml index 7d94020c6fc4..ae79c8aa7a47 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 12345 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml index f66eec734e9a..a8ce3c8dc760 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml index f66eec734e9a..a8ce3c8dc760 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml index 30b1986325b8..1a4d370e84bf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml index 4693643125c1..e179fbf4e38f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml index 5c2861101e27..68429da7bc8e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml index 7d383865f809..651706493cb8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml index 26fbfead4b36..5b192e1cfd43 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml index ba34af30dfda..adf230658d53 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:12345 +controlPlaneEndpoint: 1.1.1.1:12345 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml index 0d821692e5a6..300ee2825f4e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml index 8ac889649ff7..9866d944d95f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml index 5fb536a9f584..c8e2fbb46a33 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml @@ -30,9 +30,9 @@ schedulerExtraArgs: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml index a2e258468b93..834021df9419 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml index 6db434545383..3c8b8b41a825 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml index e0b60901ab6c..d6154f4ecd7e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml index 595bd0c94ce6..e9dd51d81158 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml @@ -23,9 +23,9 @@ imageRepository: test/repo apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml index 04237f4db1e4..a49db3c29f83 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml @@ -27,9 +27,9 @@ controllerManagerExtraArgs: schedulerExtraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml index e4e9c885b261..8d90c3e2123d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:12345 +controlPlaneEndpoint: 1.1.1.1:12345 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml index ee58cf2201e7..1788a1adb84c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml index a7193076798f..770f46cc0f67 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml index be69a16ec749..326912679e26 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml @@ -30,9 +30,9 @@ schedulerExtraArgs: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml index c195ffc2baf5..08646f704f04 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml index f7fc9b519938..25d166e0dcd3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml index d9bb198b8f70..eb057faf768b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml index 0a1e7bab7b8a..d828d7200605 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml @@ -23,9 +23,9 @@ imageRepository: test/repo apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml index 3aa0b7475436..5fe5d326bc5e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml @@ -27,9 +27,9 @@ controllerManagerExtraArgs: schedulerExtraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml index 741ad12afb4c..64efcf3938cf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml index 54abf0579361..6ef28c1c8d69 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml index df4740aaeb35..97b406559393 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml index 513e1f803a20..cf8a3e472861 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml index 1053c5c42f6d..3ef27c9b9f81 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml index 117c9070bfdc..746eb9fb7da9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml index 67c0df83a32c..a4e256775666 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml index c720ebac423b..aedd2a9047bb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml index 35aa4982b2d8..81980c953d28 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml index 304806142685..4e6bbead954f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml index 3a180ccafe9f..9a9a5c60f675 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml index 75a083a4ced9..cacacc7e43ae 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml index 587faaf4dec5..c78edc01199b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml index 680b24fe8d68..47db96b5c266 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml index 4ac52544314a..d68ef1b1f2c4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml index 2403f960638d..1e79a74a1c99 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml index 9e3d3e5088ee..f11df32d8b5a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml index cf7d8c29647e..d277ac59e637 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml index 2f1d050a40cf..758f7b2f6277 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml index ad749f03cca7..15802a185979 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml index ddc2d7cf74cf..0876e3bdde02 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml index adbc88e1d773..6ca53c67e982 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml index d401b50e81bd..0b87277ba28e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml index bf4ee2a96a9b..765a4b2398b6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml index 9b464ae19458..1105d6fc3cff 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml index 140db5ca327e..5b78859ead0d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml index c7623c0e0ff6..cb4d15968330 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml index cc6ffe6f0b82..a4c4222b5bc3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml index e2c056197741..cd1a400c07e2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml index 966a8bd99389..fe006e5d53f9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml index e8cbd19ca935..74ec1286eb56 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml index 7be737204491..3588d336df94 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml index 92d300e3167b..7651466f86ce 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml index aaed488d7bf8..779cceeb46e6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml index 29539e671c47..a37e2dda8616 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml index 34ceceafab19..c940653ab58a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml index 8b43a8ff90a6..4360299921c7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml index df0718542eca..d714225d46bd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml index 5ad344a4ae89..64161c1b0ae6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml index f0b4a1886973..66dfb8c7eaf5 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml index 87cd69c53cff..862711d0c97d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml index 9e2c74b129a7..6e3eefc76dbf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml index 5c654c39cba1..c463026f5fd6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml index afcdcea56521..55ca1f34211b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml index 014ce83eefa3..9049b3f987ed 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml index 74fd2313e6f5..c04d7a50114d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml index 79de1f51237a..980c7e94e7e3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml index 64df35076514..0e28179d6b8f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml index be8c0aec3c5a..326f80a61d4a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml index f0f0d6318db8..72ea86b7bc58 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml index cfe2ec33ded1..aa884e40d1b4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml index dbbd63b757e5..b06f44e6b6bf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml index 2abe0945cf09..7d8f48ca3dd3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml index c35c0ea5883d..0adff142ebaa 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 652392cb6522..6146fbc6345e 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -121,8 +121,10 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) return errors.Wrap(err, "encoding kubeconfig") } - kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") - copyableFiles = append(copyableFiles, kubeCfgFile) + if n.ControlPlane { + kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") + copyableFiles = append(copyableFiles, kubeCfgFile) + } for _, f := range copyableFiles { if err := cmd.Copy(f); err != nil { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 39618cf8a2b6..1c0268d77923 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -165,6 +165,13 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { glog.Infof("StartCluster complete in %s", time.Since(start)) }() + // Remove admin.conf from any previous run + c := exec.Command("/bin/bash", "-c", "sudo rm -f /etc/kubernetes/admin.conf") + _, err = k.c.RunCmd(c) + if err != nil { + return errors.Wrap(err, "deleting admin.conf") + } + version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing kubernetes version") @@ -202,7 +209,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) + c = exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) rr, err := k.c.RunCmd(c) if err != nil { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) @@ -252,35 +259,37 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error return c, err } -// WaitForCluster blocks until the cluster appears to be healthy -func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Duration) error { +// WaitForNode blocks until the node appears to be healthy +func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error { start := time.Now() out.T(out.Waiting, "Waiting for cluster to come online ...") - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return err - } + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) if err != nil { return err } - if err := kverify.WaitForAPIServerProcess(cr, k, k.c, start, timeout); err != nil { - return err + if n.ControlPlane { + if err := kverify.WaitForAPIServerProcess(cr, k, k.c, start, timeout); err != nil { + return err + } } - ip := cp.IP - port := cp.Port + ip := n.IP + port := n.Port if driver.IsKIC(cfg.Driver) { ip = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(cfg.Driver, cfg.Name, port) + p, err := oci.ForwardedPort(cfg.Driver, driver.MachineName(cfg, n), port) if err != nil { return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) } + port = p } - if err := kverify.WaitForHealthyAPIServer(cr, k, k.c, start, ip, port, timeout); err != nil { - return err + if n.ControlPlane { + if err := kverify.WaitForHealthyAPIServer(cr, k, k.c, start, ip, port, timeout); err != nil { + return err + } } c, err := k.client(ip, port) @@ -345,37 +354,76 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "apiserver healthz") } - for _, n := range cfg.Nodes { - ip := n.IP - port := n.Port - if driver.IsKIC(cfg.Driver) { - ip = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(cfg.Driver, cfg.Name, port) - if err != nil { - return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) - } - } - client, err := k.client(ip, port) + cp, err := config.PrimaryControlPlane(&cfg) + if err != nil { + return errors.Wrap(err, "getting control plane") + } + ip := cp.IP + port := cp.Port + if driver.IsKIC(cfg.Driver) { + ip = oci.DefaultBindIPV4 + port, err = oci.ForwardedPort(cfg.Driver, driver.MachineName(cfg, cp), port) if err != nil { - return errors.Wrap(err, "getting k8s client") + return errors.Wrapf(err, "get host-bind port %d for container %s", port, driver.MachineName(cfg, cp)) } + } + client, err := k.client(ip, port) + if err != nil { + return errors.Wrap(err, "getting k8s client") + } - if err := kverify.WaitForSystemPods(cr, k, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { - return errors.Wrap(err, "system pods") - } + if err := kverify.WaitForSystemPods(cr, k, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + return errors.Wrap(err, "system pods") + } - // Explicitly re-enable kubeadm addons (proxy, coredns) so that they will check for IP or configuration changes. - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil { - return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) - } + // Explicitly re-enable kubeadm addons (proxy, coredns) so that they will check for IP or configuration changes. + if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil { + return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) + } - if err := bsutil.AdjustResourceLimits(k.c); err != nil { - glog.Warningf("unable to adjust resource limits: %v", err) - } + if err := bsutil.AdjustResourceLimits(k.c); err != nil { + glog.Warningf("unable to adjust resource limits: %v", err) + } + return nil +} + +// JoinCluster adds a node to an existing cluster +func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string) error { + start := time.Now() + glog.Infof("JoinCluster: %+v", cc) + defer func() { + glog.Infof("JoinCluster complete in %s", time.Since(start)) + }() + + // Join the master by specifying its token + joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, driver.MachineName(cc, n)) + out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) + if err != nil { + return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) + } + + if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet")); err != nil { + return errors.Wrap(err, "starting kubelet") } + return nil } +// GenerateToken creates a token and returns the appropriate kubeadm join command to run +func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) + r, err := k.c.RunCmd(tokenCmd) + if err != nil { + return "", errors.Wrap(err, "generating bootstrap token") + } + + joinCmd := r.Stdout.String() + joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) + joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) + + return joinCmd, nil +} + // DeleteCluster removes the components that were started earlier func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) @@ -400,7 +448,7 @@ func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) er return bootstrapper.SetupCerts(k.c, k8s, n) } -// UpdateCluster updates the cluster +// UpdateCluster updates the cluster. func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) if err != nil { @@ -418,14 +466,24 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "runtime") } - // TODO: multiple nodes - kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, cfg.Nodes[0]) + for _, n := range cfg.Nodes { + err := k.UpdateNode(cfg, n, r) + if err != nil { + return errors.Wrap(err, "updating node") + } + } + + return nil +} + +// UpdateNode updates a node. +func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cruntime.Manager) error { + kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubeadm cfg") } - // TODO: multiple nodes - kubeletCfg, err := bsutil.NewKubeletConfig(cfg, cfg.Nodes[0], r) + kubeletCfg, err := bsutil.NewKubeletConfig(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubelet config") } diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index 4a52268ce5e5..f11816070c01 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -52,6 +52,14 @@ const ( var ( // ErrKeyNotFound is the error returned when a key doesn't exist in the config file ErrKeyNotFound = errors.New("specified key could not be found in config") + // DockerEnv contains the environment variables + DockerEnv []string + // DockerOpt contains the option parameters + DockerOpt []string + // ExtraOptions contains extra options (if any) + ExtraOptions ExtraOptionSlice + // AddonList contains the list of addons + AddonList []string ) // ErrNotExist is the error returned when a config does not exist diff --git a/pkg/minikube/config/node.go b/pkg/minikube/config/node.go deleted file mode 100644 index 1c6f050159e2..000000000000 --- a/pkg/minikube/config/node.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -// AddNode adds a new node config to an existing cluster. -func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { - node := Node{ - Name: name, - Worker: true, - } - - if controlPlane { - node.ControlPlane = true - } - - if k8sVersion != "" { - node.KubernetesVersion = k8sVersion - } - - cc.Nodes = append(cc.Nodes, node) - return SaveProfile(profileName, cc) -} diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index c146a8ac424d..bfb6298c71f2 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -111,6 +111,23 @@ func CreateEmptyProfile(name string, miniHome ...string) error { return SaveProfile(name, cfg, miniHome...) } +// SaveNode saves a node to a cluster +func SaveNode(cfg *ClusterConfig, node *Node) error { + update := false + for i, n := range cfg.Nodes { + if n.Name == node.Name { + cfg.Nodes[i] = *node + update = true + break + } + } + + if !update { + cfg.Nodes = append(cfg.Nodes, *node) + } + return SaveProfile(viper.GetString(ProfileName), cfg) +} + // SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles//config.json func SaveProfile(name string, cfg *ClusterConfig, miniHome ...string) error { data, err := json.MarshalIndent(cfg, "", " ") diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 2dce6350cde7..b6106474d8d1 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -112,7 +112,7 @@ func IsMock(name string) bool { // IsVM checks if the driver is a VM func IsVM(name string) bool { - if IsKIC(name) || IsMock(name) || BareMetal(name) { + if IsKIC(name) || BareMetal(name) { return false } return true @@ -234,5 +234,13 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { if len(cc.Nodes) == 1 || n.ControlPlane { return cc.Name } - return fmt.Sprintf("%s-%s", cc.Name, n.Name) + return fmt.Sprintf("%s---%s", cc.Name, n.Name) +} + +// ClusterNameFromMachine retrieves the cluster name embedded in the machine name +func ClusterNameFromMachine(name string) (string, string) { + if strings.Contains(name, "---") { + return strings.Split(name, "---")[0], strings.Split(name, "---")[1] + } + return name, name } diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 0c32c8642a28..a058b905c19b 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -108,6 +108,7 @@ func TestCreateHost(t *testing.T) { func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) + // Create an initial host. ih, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) if err != nil { @@ -129,7 +130,7 @@ func TestStartHostExists(t *testing.T) { n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc, n) + h, _, err := StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -163,7 +164,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n := config.Node{Name: h.Name} // This should pass with creating host, while machine does not exist. - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { if err != ErrorMachineNotExist { t.Fatalf("Error starting host: %v", err) @@ -173,8 +174,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) { mc.Name = h.Name n.Name = h.Name + n.Name = h.Name + // Second call. This should pass without calling Create because the host exists already. - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -207,7 +210,7 @@ func TestStartStoppedHost(t *testing.T) { mc := defaultClusterConfig mc.Name = h.Name n := config.Node{Name: h.Name} - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { t.Fatal("Error starting host.") } @@ -235,7 +238,7 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } @@ -269,7 +272,7 @@ func TestStartHostConfig(t *testing.T) { DockerOpt: []string{"param=value"}, } - h, err := StartHost(api, cfg, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, cfg, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } @@ -422,16 +425,19 @@ func TestCreateSSHShell(t *testing.T) { t.Fatalf("Error starting ssh server: %v", err) } + m := viper.GetString("profile") + d := &tests.MockDriver{ Port: port, CurrentState: state.Running, BaseDriver: drivers.BaseDriver{ - IPAddress: "127.0.0.1", - SSHKeyPath: "", + IPAddress: "127.0.0.1", + SSHKeyPath: "", + MachineName: m, }, T: t, } - api.Hosts[viper.GetString("profile")] = &host.Host{Driver: d} + api.Hosts[m] = &host.Host{Driver: d} cc := defaultClusterConfig cc.Name = viper.GetString("profile") diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 25f921cd5a9a..6820fe9efb59 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -25,7 +25,6 @@ import ( "time" "github.com/docker/machine/drivers/virtualbox" - "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/provision" diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 73c982fa809f..368af5b3af96 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -62,28 +62,32 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { +func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, bool, error) { + machineName := driver.MachineName(cfg, n) + // Prevent machine-driver boot races, as well as our own certificate race - releaser, err := acquireMachinesLock(cfg.Name) + releaser, err := acquireMachinesLock(machineName) if err != nil { - return nil, errors.Wrap(err, "boot lock") + return nil, false, errors.Wrap(err, "boot lock") } start := time.Now() defer func() { - glog.Infof("releasing machines lock for %q, held for %s", cfg.Name, time.Since(start)) + glog.Infof("releasing machines lock for %q, held for %s", machineName, time.Since(start)) releaser.Release() }() - exists, err := api.Exists(cfg.Name) + exists, err := api.Exists(machineName) if err != nil { - return nil, errors.Wrapf(err, "exists: %s", cfg.Name) + return nil, false, errors.Wrapf(err, "exists: %s", machineName) } if !exists { - glog.Infof("Provisioning new machine with config: %+v", cfg) - return createHost(api, cfg, n) + glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n) + h, err := createHost(api, cfg, n) + return h, exists, err } glog.Infoln("Skipping create...Using existing machine configuration") - return fixHost(api, cfg, n) + h, err := fixHost(api, cfg, n) + return h, exists, err } func engineOptions(cfg config.ClusterConfig) *engine.Options { @@ -98,7 +102,7 @@ func engineOptions(cfg config.ClusterConfig) *engine.Options { } func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { - glog.Infof("createHost starting for %q (driver=%q)", cfg.Name, cfg.Driver) + glog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver) start := time.Now() defer func() { glog.Infof("createHost completed in %s", time.Since(start)) diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go index 293424fb8d47..f1b3ac8f321b 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/node/cache.go @@ -35,8 +35,13 @@ import ( "k8s.io/minikube/pkg/minikube/out" ) -// beginCacheKubernetesImages caches images required for kubernetes version in the background -func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion, cRuntime string) { +const ( + cacheImages = "cache-images" + cacheImageConfigKey = "cache" +) + +// BeginCacheKubernetesImages caches images required for kubernetes version in the background +func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { if download.PreloadExists(k8sVersion, cRuntime) { glog.Info("Caching tarball of preloaded images") err := download.Preload(k8sVersion, cRuntime) @@ -47,7 +52,7 @@ func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVe glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) } - if !viper.GetBool("cache-images") { + if !viper.GetBool(cacheImages) { return } @@ -56,6 +61,7 @@ func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVe }) } +// HandleDownloadOnly caches appropariate binaries and images func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { // If --download-only, complete the remaining downloads and exit. if !viper.GetBool("download-only") { @@ -92,7 +98,7 @@ func doCacheBinaries(k8sVersion string) error { return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) } -// beginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available +// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available func beginDownloadKicArtifacts(g *errgroup.Group) { glog.Info("Beginning downloading kic artifacts") g.Go(func() error { @@ -101,6 +107,7 @@ func beginDownloadKicArtifacts(g *errgroup.Group) { }) } +// WaitDownloadKicArtifacts blocks until the required artifacts for KIC are downloaded. func waitDownloadKicArtifacts(g *errgroup.Group) { if err := g.Wait(); err != nil { glog.Errorln("Error downloading kic artifacts: ", err) @@ -109,7 +116,7 @@ func waitDownloadKicArtifacts(g *errgroup.Group) { glog.Info("Successfully downloaded all kic artifacts") } -// waitCacheRequiredImages blocks until the required images are all cached. +// WaitCacheRequiredImages blocks until the required images are all cached. func waitCacheRequiredImages(g *errgroup.Group) { if !viper.GetBool(cacheImages) { return @@ -132,6 +139,19 @@ func saveImagesToTarFromConfig() error { return image.SaveToDir(images, constants.ImageCacheDir) } +// CacheAndLoadImagesInConfig loads the images currently in the config file +// called by 'start' and 'cache reload' commands. +func CacheAndLoadImagesInConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return machine.CacheAndLoadImages(images) +} + func imagesInConfigFile() ([]string, error) { configFile, err := config.ReadConfig(localpath.ConfigFile()) if err != nil { @@ -146,16 +166,3 @@ func imagesInConfigFile() ([]string, error) { } return []string{}, nil } - -// CacheAndLoadImagesInConfig loads the images currently in the config file -// called by 'start' and 'cache reload' commands. -func CacheAndLoadImagesInConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return machine.CacheAndLoadImages(images) -} diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index b29867f1b6ae..ef0f66dc1265 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -18,165 +18,33 @@ package node import ( "fmt" - "net" "os" "os/exec" "path/filepath" "strconv" - "github.com/blang/semver" - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" "github.com/golang/glog" - "github.com/pkg/errors" "github.com/spf13/viper" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/bootstrapper" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/lock" ) -var ( - // DockerEnv contains the environment variables - DockerEnv []string - // DockerOpt contains the option parameters - DockerOpt []string - // ExtraOptions contains extra options (if any) - ExtraOptions config.ExtraOptionSlice - // AddonList contains the list of addons - AddonList []string -) - -// configureRuntimes does what needs to happen to get a runtime going. -func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { - co := cruntime.Config{ - Type: viper.GetString(containerRuntime), - Runner: runner, ImageRepository: k8s.ImageRepository, - KubernetesVersion: kv, - } - cr, err := cruntime.New(co) - if err != nil { - exit.WithError("Failed runtime", err) - } - - disableOthers := true - if driver.BareMetal(drvName) { - disableOthers = false - } - - // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. - if driver.IsVM(drvName) { - if err := cr.Preload(k8s); err != nil { - switch err.(type) { - case *cruntime.ErrISOFeature: - out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) - default: - glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) - } - - if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { - exit.WithError("Failed to cache images", err) - } - } - } - - err = cr.Enable(disableOthers) - if err != nil { - exit.WithError("Failed to enable container runtime", err) - } - - return cr -} - func showVersionInfo(k8sVersion string, cr cruntime.Manager) { version, _ := cr.Version() out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version}) - for _, v := range DockerOpt { + for _, v := range config.DockerOpt { out.T(out.Option, "opt {{.docker_option}}", out.V{"docker_option": v}) } - for _, v := range DockerEnv { + for _, v := range config.DockerEnv { out.T(out.Option, "env {{.docker_env}}", out.V{"docker_env": v}) } } -// setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper { - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, node) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) - } - for _, eo := range ExtraOptions { - out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) - } - // Loads cached images, generates config files, download binaries - if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) - } - if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil { - exit.WithError("Failed to setup certs", err) - } - return bs -} - -func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := apiServerURL(*h, *cc, *n) - if err != nil { - exit.WithError("Failed to get api server URL", err) - } - - kcs := &kubeconfig.Settings{ - ClusterName: clusterName, - ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), - CertificateAuthority: localpath.MakeMiniPath("ca.crt"), - KeepContext: viper.GetBool(keepContext), - EmbedCerts: viper.GetBool(embedCerts), - } - - kcs.SetPath(kubeconfig.PathFromEnv()) - if err := kubeconfig.Update(kcs); err != nil { - return kcs, err - } - return kcs, nil -} - -// apiServerURL returns a URL to end user can reach to the api server -func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { - hostname := "" - port := n.Port - var err error - if driver.IsKIC(h.DriverName) { - // for kic drivers we use 127.0.0.1 instead of node IP, - // because of Docker on MacOs limitations for reaching to container's IP. - hostname = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(h.DriverName, h.Name, port) - if err != nil { - return "", errors.Wrap(err, "host port binding") - } - } else { - hostname, err = h.Driver.GetIP() - if err != nil { - return "", errors.Wrap(err, "get ip") - } - } - - if cc.KubernetesConfig.APIServerName != constants.APIServerName { - hostname = cc.KubernetesConfig.APIServerName - } - return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil -} - // configureMounts configures any requested filesystem mounts func configureMounts() { if !viper.GetBool(createMount) { diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go deleted file mode 100644 index 483131515af5..000000000000 --- a/pkg/minikube/node/machine.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "fmt" - "net" - "os" - "os/exec" - "strings" - "time" - - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" - "github.com/golang/glog" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/proxy" - "k8s.io/minikube/pkg/util/retry" -) - -func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { - m, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Failed to get machine client", err) - } - host, preExists = startHost(m, *cfg, *node) - runner, err = machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - - ip := validateNetwork(host, runner) - - // Bypass proxy for minikube's vm host ip - err = proxy.ExcludeIP(ip) - if err != nil { - out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) - } - // Save IP to configuration file for subsequent use - node.IP = ip - - if err := Save(cfg, node); err != nil { - exit.WithError("Failed to save config", err) - } - - return runner, preExists, m, host -} - -// startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { - exists, err := api.Exists(mc.Name) - if err != nil { - exit.WithError("Failed to check if machine exists", err) - } - - host, err := machine.StartHost(api, mc, n) - if err != nil { - exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) - } - return host, exists -} - -// validateNetwork tries to catch network problems as soon as possible -func validateNetwork(h *host.Host, r command.Runner) string { - ip, err := h.Driver.GetIP() - if err != nil { - exit.WithError("Unable to get VM IP address", err) - } - - optSeen := false - warnedOnce := false - for _, k := range proxy.EnvVars { - if v := os.Getenv(k); v != "" { - if !optSeen { - out.T(out.Internet, "Found network options:") - optSeen = true - } - out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) - ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY - k = strings.ToUpper(k) // for http_proxy & https_proxy - if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { - out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) - warnedOnce = true - } - } - } - - if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { - trySSH(h, ip) - } - - tryLookup(r) - tryRegistry(r) - return ip -} - -func trySSH(h *host.Host, ip string) { - if viper.GetBool("force") { - return - } - - sshAddr := net.JoinHostPort(ip, "22") - - dial := func() (err error) { - d := net.Dialer{Timeout: 3 * time.Second} - conn, err := d.Dial("tcp", sshAddr) - if err != nil { - out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) - return err - } - _ = conn.Close() - return nil - } - - if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { - exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} - - This is likely due to one of two reasons: - - - VPN or firewall interference - - {{.hypervisor}} network configuration issue - - Suggested workarounds: - - - Disable your local VPN or firewall software - - Configure your local VPN or firewall to allow access to {{.ip}} - - Restart or reinstall {{.hypervisor}} - - Use an alternative --driver - - Use --force to override this connectivity check - `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) - } -} - -func tryLookup(r command.Runner) { - // DNS check - if rr, err := r.RunCmd(exec.Command("nslookup", "-type=ns", "kubernetes.io")); err != nil { - glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) - // will try with without query type for ISOs with different busybox versions. - if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { - glog.Warningf("nslookup failed: %v", err) - // try with the older "host" command, instead of the newer "nslookup" - if _, err = r.RunCmd(exec.Command("host", "kubernetes.io")); err != nil { - out.WarningT("Node may be unable to resolve external DNS records") - } - } - } -} -func tryRegistry(r command.Runner) { - // Try an HTTPS connection to the image repository - proxy := os.Getenv("HTTPS_PROXY") - opts := []string{"-sS"} - if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { - opts = append([]string{"-x", proxy}, opts...) - } - - repo := viper.GetString(imageRepository) - if repo == "" { - repo = images.DefaultKubernetesRepo - } - - opts = append(opts, fmt.Sprintf("https://%s/", repo)) - if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { - glog.Warningf("%s failed: %v", rr.Args, err) - out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) - } -} diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index e92bad65b597..97e9d2f2042e 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -18,6 +18,7 @@ package node import ( "errors" + "fmt" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" @@ -25,48 +26,22 @@ import ( "k8s.io/minikube/pkg/minikube/machine" ) +// TODO: Share these between cluster and node packages const ( - imageRepository = "image-repository" - cacheImages = "cache-images" - waitUntilHealthy = "wait" - cacheImageConfigKey = "cache" - containerRuntime = "container-runtime" - embedCerts = "embed-certs" - keepContext = "keep-context" - mountString = "mount-string" - createMount = "mount" - waitTimeout = "wait-timeout" + mountString = "mount-string" + createMount = "mount" ) // Add adds a new node config to an existing cluster. -func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) (*config.Node, error) { - n := config.Node{ - Name: name, - Worker: true, - } - - if controlPlane { - n.ControlPlane = true - } - - if worker { - n.Worker = true - } +func Add(cc *config.ClusterConfig, n config.Node) error { - if k8sVersion != "" { - n.KubernetesVersion = k8sVersion - } else { - n.KubernetesVersion = cc.KubernetesConfig.KubernetesVersion - } - - cc.Nodes = append(cc.Nodes, n) - err := config.SaveProfile(profileName, cc) + err := config.SaveNode(cc, &n) if err != nil { - return nil, err + return err } - _, err = Start(*cc, n, false, nil) - return &n, err + Start(*cc, n, nil, false) + return nil } // Delete stops and deletes the given node from the given cluster @@ -117,3 +92,8 @@ func Save(cfg *config.ClusterConfig, node *config.Node) error { } return config.SaveProfile(viper.GetString(config.ProfileName), cfg) } + +// Name returns the appropriate name for the node given the current number of nodes +func Name(index int) string { + return fmt.Sprintf("m%02d", index) +} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index a3c5eee92bee..b46469837eda 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -17,103 +17,417 @@ limitations under the License. package node import ( + "fmt" + "net" "os" + "os/exec" + "strconv" + "strings" + "time" + "github.com/blang/semver" + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" + "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/viper" "golang.org/x/sync/errgroup" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" + "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/logs" + "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/util/retry" ) -// Start spins up a guest and starts the kubernetes node. -func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { - k8sVersion := mc.KubernetesConfig.KubernetesVersion - driverName := mc.Driver +const ( + waitTimeout = "wait-timeout" + waitUntilHealthy = "wait" + embedCerts = "embed-certs" + keepContext = "keep-context" + imageRepository = "image-repository" + containerRuntime = "container-runtime" +) - // If using kic, make sure we download the kic base image +// Start spins up a guest and starts the kubernetes node. +func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) *kubeconfig.Settings { var kicGroup errgroup.Group - if driver.IsKIC(driverName) { + if driver.IsKIC(cc.Driver) { beginDownloadKicArtifacts(&kicGroup) } var cacheGroup errgroup.Group - // Adding a second layer of cache does not make sense for the none driver - if !driver.BareMetal(driverName) { - beginCacheKubernetesImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, k8sVersion, mc.KubernetesConfig.ContainerRuntime) + if !driver.BareMetal(cc.Driver) { + beginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) } // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. - // Hence, saveProfile must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.ProfileName), &mc); err != nil { + // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. + if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { exit.WithError("Failed to save config", err) } - // exits here in case of --download-only option. - handleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion) + handleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) waitDownloadKicArtifacts(&kicGroup) - mRunner, preExists, machineAPI, host := startMachine(&mc, &n) + mRunner, preExists, machineAPI, host := startMachine(&cc, &n) defer machineAPI.Close() // wait for preloaded tarball to finish downloading before configuring runtimes waitCacheRequiredImages(&cacheGroup) - sv, err := util.ParseKubernetesVersion(mc.KubernetesConfig.KubernetesVersion) + sv, err := util.ParseKubernetesVersion(n.KubernetesVersion) if err != nil { - return nil, err + exit.WithError("Failed to parse kubernetes version", err) } // configure the runtime (docker, containerd, crio) - cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig, sv) - showVersionInfo(k8sVersion, cr) + cr := configureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv) + showVersionInfo(n.KubernetesVersion, cr) - //TODO(sharifelgamal): Part out the cluster-wide operations, perhaps using the "primary" param + var bs bootstrapper.Bootstrapper + var kubeconfig *kubeconfig.Settings + if apiServer { + // Must be written before bootstrap, otherwise health checks may flake due to stale IP + kubeconfig, err = setupKubeconfig(host, &cc, &n, cc.Name) + if err != nil { + exit.WithError("Failed to setup kubeconfig", err) + } - // Must be written before bootstrap, otherwise health checks may flake due to stale IP - kubeconfig, err := setupKubeconfig(host, &mc, &n, mc.Name) - if err != nil { - exit.WithError("Failed to setup kubeconfig", err) - } + // setup kubeadm (must come after setupKubeconfig) + bs = setupKubeAdm(machineAPI, cc, n) + err = bs.StartCluster(cc) + if err != nil { + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) + } + } else { + bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) + } - // setup kubeadm (must come after setupKubeconfig) - bs := setupKubeAdm(machineAPI, mc, n) + if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil { + exit.WithError("setting up certs", err) + } - // pull images or restart cluster - out.T(out.Launch, "Launching Kubernetes ... ") - if err := bs.StartCluster(mc); err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) } + configureMounts() + if err := CacheAndLoadImagesInConfig(); err != nil { + out.T(out.FailureType, "Unable to load cached images from config file.") + } + // enable addons, both old and new! if existingAddons != nil { - addons.Start(viper.GetString(config.ProfileName), existingAddons, AddonList) + addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList) } - if err = CacheAndLoadImagesInConfig(); err != nil { - out.T(out.FailureType, "Unable to load cached images from config file.") + if apiServer { + // special ops for none , like change minikube directory. + // multinode super doesn't work on the none driver + if cc.Driver == driver.None && len(cc.Nodes) == 1 { + prepareNone() + } + + // Skip pre-existing, because we already waited for health + if viper.GetBool(waitUntilHealthy) && !preExists { + if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { + exit.WithError("Wait failed", err) + } + } + } else { + if err := bs.UpdateNode(cc, n, cr); err != nil { + exit.WithError("Updating node", err) + } + + cp, err := config.PrimaryControlPlane(&cc) + if err != nil { + exit.WithError("Getting primary control plane", err) + } + cpBs, err := cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp) + if err != nil { + exit.WithError("Getting bootstrapper", err) + } + + joinCmd, err := cpBs.GenerateToken(cc) + if err != nil { + exit.WithError("generating join token", err) + } + + if err = bs.JoinCluster(cc, n, joinCmd); err != nil { + exit.WithError("joining cluster", err) + } + } + + return kubeconfig + +} + +// ConfigureRuntimes does what needs to happen to get a runtime going. +func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { + co := cruntime.Config{ + Type: viper.GetString(containerRuntime), + Runner: runner, ImageRepository: k8s.ImageRepository, + KubernetesVersion: kv, + } + cr, err := cruntime.New(co) + if err != nil { + exit.WithError("Failed runtime", err) + } + + disableOthers := true + if driver.BareMetal(drvName) { + disableOthers = false + } + + // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. + if driver.IsVM(drvName) { + if err := cr.Preload(k8s); err != nil { + switch err.(type) { + case *cruntime.ErrISOFeature: + out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) + default: + glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) + } + + if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { + exit.WithError("Failed to cache images", err) + } + } + } + + err = cr.Enable(disableOthers) + if err != nil { + exit.WithError("Failed to enable container runtime", err) + } + + return cr +} + +// setupKubeAdm adds any requested files into the VM before Kubernetes is started +func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) + } + for _, eo := range config.ExtraOptions { + out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) + } + // Loads cached images, generates config files, download binaries + if err := bs.UpdateCluster(cfg); err != nil { + exit.WithError("Failed to update cluster", err) + } + if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { + exit.WithError("Failed to setup certs", err) + } + return bs +} + +func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { + addr, err := apiServerURL(*h, *cc, *n) + if err != nil { + exit.WithError("Failed to get API Server URL", err) + } + + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1) + } + kcs := &kubeconfig.Settings{ + ClusterName: clusterName, + ClusterServerAddress: addr, + ClientCertificate: localpath.MakeMiniPath("client.crt"), + ClientKey: localpath.MakeMiniPath("client.key"), + CertificateAuthority: localpath.MakeMiniPath("ca.crt"), + KeepContext: viper.GetBool(keepContext), + EmbedCerts: viper.GetBool(embedCerts), } - // special ops for none , like change minikube directory. - if driverName == driver.None { - prepareNone() + kcs.SetPath(kubeconfig.PathFromEnv()) + if err := kubeconfig.Update(kcs); err != nil { + return kcs, err } + return kcs, nil +} - // Skip pre-existing, because we already waited for health - if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForCluster(mc, viper.GetDuration(waitTimeout)); err != nil { - exit.WithError("Wait failed", err) +func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { + hostname := "" + port := n.Port + var err error + if driver.IsKIC(h.DriverName) { + // for kic drivers we use 127.0.0.1 instead of node IP, + // because of Docker on MacOs limitations for reaching to container's IP. + hostname = oci.DefaultBindIPV4 + port, err = oci.ForwardedPort(h.DriverName, h.Name, port) + if err != nil { + return "", errors.Wrap(err, "host port binding") } + } else { + hostname, err = h.Driver.GetIP() + if err != nil { + return "", errors.Wrap(err, "get ip") + } + } + + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + hostname = cc.KubernetesConfig.APIServerName + } + return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil +} + +// StartMachine starts a VM +func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { + m, err := machine.NewAPIClient() + if err != nil { + exit.WithError("Failed to get machine client", err) + } + host, preExists = startHost(m, *cfg, *node) + runner, err = machine.CommandRunner(host) + if err != nil { + exit.WithError("Failed to get command runner", err) + } + + ip := validateNetwork(host, runner) + + // Bypass proxy for minikube's vm host ip + err = proxy.ExcludeIP(ip) + if err != nil { + out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) + } + + // Save IP to config file for subsequent use + node.IP = ip + err = config.SaveNode(cfg, node) + if err != nil { + exit.WithError("saving node", err) + } + + return runner, preExists, m, host +} + +// startHost starts a new minikube host using a VM or None +func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { + host, exists, err := machine.StartHost(api, mc, n) + if err != nil { + exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) } + return host, exists +} - return kubeconfig, nil +// validateNetwork tries to catch network problems as soon as possible +func validateNetwork(h *host.Host, r command.Runner) string { + ip, err := h.Driver.GetIP() + if err != nil { + exit.WithError("Unable to get VM IP address", err) + } + + optSeen := false + warnedOnce := false + for _, k := range proxy.EnvVars { + if v := os.Getenv(k); v != "" { + if !optSeen { + out.T(out.Internet, "Found network options:") + optSeen = true + } + out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) + ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY + k = strings.ToUpper(k) // for http_proxy & https_proxy + if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { + out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) + warnedOnce = true + } + } + } + + if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { + trySSH(h, ip) + } + + tryLookup(r) + tryRegistry(r) + return ip +} + +func trySSH(h *host.Host, ip string) { + if viper.GetBool("force") { + return + } + + sshAddr := net.JoinHostPort(ip, "22") + + dial := func() (err error) { + d := net.Dialer{Timeout: 3 * time.Second} + conn, err := d.Dial("tcp", sshAddr) + if err != nil { + out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) + return err + } + _ = conn.Close() + return nil + } + + if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { + exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} + + This is likely due to one of two reasons: + + - VPN or firewall interference + - {{.hypervisor}} network configuration issue + + Suggested workarounds: + + - Disable your local VPN or firewall software + - Configure your local VPN or firewall to allow access to {{.ip}} + - Restart or reinstall {{.hypervisor}} + - Use an alternative --vm-driver + - Use --force to override this connectivity check + `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) + } +} + +func tryLookup(r command.Runner) { + // DNS check + if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil { + glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) + // will try with without query type for ISOs with different busybox versions. + if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { + glog.Warningf("nslookup failed: %v", err) + out.WarningT("Node may be unable to resolve external DNS records") + } + } +} +func tryRegistry(r command.Runner) { + // Try an HTTPS connection to the image repository + proxy := os.Getenv("HTTPS_PROXY") + opts := []string{"-sS"} + if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { + opts = append([]string{"-x", proxy}, opts...) + } + + repo := viper.GetString(imageRepository) + if repo == "" { + repo = images.DefaultKubernetesRepo + } + + opts = append(opts, fmt.Sprintf("https://%s/", repo)) + if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { + glog.Warningf("%s failed: %v", rr.Args, err) + out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) + } } // prepareNone prepares the user and host for the joy of the "none" driver diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 52fb131960d4..acad46c3ac06 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -39,6 +39,7 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/sshutil" ) @@ -195,7 +196,8 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { } func setContainerRuntimeOptions(name string, p miniProvisioner) error { - c, err := config.Load(name) + cluster, _ := driver.ClusterNameFromMachine(name) + c, err := config.Load(cluster) if err != nil { return errors.Wrap(err, "getting cluster config") }