diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 02b2e533089a..82ff8628b778 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,11 +1,15 @@ { "ImportPath": "k8s.io/minikube", "GoVersion": "go1.9", - "GodepVersion": "v79", + "GodepVersion": "v74", "Packages": [ - "./..." + "github.com/elgs/gostrgen" ], "Deps": [ + { + "ImportPath": "github.com/elgs/gostrgen", + "Rev": "9d61ae07eeaeb54ce2100022b1fd3ea020093d09" + }, { "ImportPath": "bitbucket.org/ww/goautoneg", "Comment": "null-5", diff --git a/Makefile b/Makefile index 7558918b4b09..e12f9b9a0384 100755 --- a/Makefile +++ b/Makefile @@ -140,7 +140,7 @@ ifeq ($(IN_DOCKER),1) $(MAKE) minikube_iso else docker run --rm --workdir /mnt --volume $(CURDIR):/mnt $(ISO_DOCKER_EXTRA_ARGS) \ - --user $(shell id -u):$(shell id -g) --env HOME=/tmp --env IN_DOCKER=1 \ + --env HOME=/tmp --env IN_DOCKER=1 \ $(ISO_BUILD_IMAGE) /usr/bin/make out/minikube.iso endif diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 54ec6abe187b..f4d4cadaf82f 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -49,7 +49,7 @@ associated files.`, } defer api.Close() - if err = cluster.DeleteHost(api); err != nil { + if err = cluster.DeleteHost(pkg_config.GetMachineName(), api); err != nil { fmt.Println("Errors occurred deleting machine: ", err) os.Exit(1) } diff --git a/cmd/minikube/cmd/env.go b/cmd/minikube/cmd/env.go index d5a0cf4ad8cc..75441836bb6a 100644 --- a/cmd/minikube/cmd/env.go +++ b/cmd/minikube/cmd/env.go @@ -146,7 +146,7 @@ func generateUsageHint(userShell string) string { func shellCfgSet(api libmachine.API) (*ShellConfig, error) { - envMap, err := cluster.GetHostDockerEnv(api) + envMap, err := cluster.GetHostDockerEnv(config.GetMachineName(), api) if err != nil { return nil, err } diff --git a/cmd/minikube/cmd/node/add.go b/cmd/minikube/cmd/node/add.go new file mode 100644 index 000000000000..f8c95c6afba0 --- /dev/null +++ b/cmd/minikube/cmd/node/add.go @@ -0,0 +1,57 @@ +package node + +import ( + "fmt" + "os" + + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "k8s.io/minikube/cmd/minikube/profile" + cmdutil "k8s.io/minikube/cmd/util" + "k8s.io/minikube/pkg/minikube" + cfg "k8s.io/minikube/pkg/minikube/config" +) + +func NewCmdAdd() *cobra.Command { + return &cobra.Command{ + Use: "add ", + Short: "Adds a node to the cluster", + Long: "Adds a node tot the cluster", + Run: add, + } +} + +func add(cmd *cobra.Command, args []string) { + // TODO Make clusterName into `--cluster=` flag + clusterName := viper.GetString(cfg.MachineProfile) + + nodeName := "" + if len(args) > 0 { + nodeName = args[0] + } + + cfg, err := profile.LoadConfigFromFile(clusterName) + if err != nil { + glog.Errorln("Error loading profile config: ", err) + cmdutil.MaybeReportErrorAndExit(err) + } + + if nodeName == "" { + nodeName = fmt.Sprintf("node-%d", len(cfg.Nodes)+1) + } + + node := minikube.NodeConfig{ + Name: nodeName, + } + + cfg.Nodes = append(cfg.Nodes, node) + + if err := profile.SaveConfig(clusterName, cfg); err != nil { + glog.Errorln("Error saving profile cluster configuration: ", err) + os.Exit(1) + } + + fmt.Println("Added node: ", node.Name) +} diff --git a/cmd/minikube/cmd/node/list.go b/cmd/minikube/cmd/node/list.go new file mode 100644 index 000000000000..16911f040e97 --- /dev/null +++ b/cmd/minikube/cmd/node/list.go @@ -0,0 +1,68 @@ +package node + +import ( + "fmt" + "os" + + "github.com/golang/glog" + "github.com/spf13/cobra" + + "k8s.io/minikube/cmd/minikube/profile" + cmdutil "k8s.io/minikube/cmd/util" + "k8s.io/minikube/pkg/minikube" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/node" +) + +func NewCmdList() *cobra.Command { + return &cobra.Command{ + Use: "list", + Short: "Lists all nodes", + Long: "Lists all nodes", + Run: list, + } +} + +func list(cmd *cobra.Command, args []string) { + configs, err := profile.LoadClusterConfigs() + if err != nil { + glog.Errorln("Error loading cluster configs: ", err) + cmdutil.MaybeReportErrorAndExit(err) + } + + api, err := machine.NewAPIClient() + if err != nil { + glog.Errorf("Error getting client: %s\n", err) + os.Exit(1) + } + defer api.Close() + + fmt.Printf("%-20s %-20s %-16s %-20s\n", "CLUSTER", "NODE", "IP", "STATUS") + + nodesFound := false + for _, c := range configs { + for _, nc := range c.Nodes { + nodesFound = true + n := node.NewNode(nc, c.MachineConfig, c.ClusterName, api) + status, err := n.Status() + if err != nil { + status = minikube.NodeStatus("Error: " + err.Error()) + } + + ip := "" + if status == minikube.StatusRunning { + ip, err = n.IP() + if err != nil { + glog.Errorf("Error getting IP address for node %s: %s", nc.Name, err) + cmdutil.MaybeReportErrorAndExit(err) + } + } + + fmt.Printf("%-20s %-20s %-16s %-20s\n", c.ClusterName, nc.Name, ip, status) + } + } + + if !nodesFound { + fmt.Println("No nodes found.") + } +} diff --git a/cmd/minikube/cmd/node/node.go b/cmd/minikube/cmd/node/node.go new file mode 100644 index 000000000000..2459c6a8bac0 --- /dev/null +++ b/cmd/minikube/cmd/node/node.go @@ -0,0 +1,50 @@ +package node + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "k8s.io/minikube/cmd/minikube/profile" + "k8s.io/minikube/pkg/minikube" +) + +const internalErrorCode = -1 + +func NewCmdNode() *cobra.Command { + cmd := &cobra.Command{ + Use: "node SUBCOMMAND [flags]", + Short: "Control a minikube cluster's nodes", + Long: `Control a cluster's nodes using subcommands like "minikube node add "`, + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() + }, + } + cmd.AddCommand(NewCmdAdd()) + cmd.AddCommand(NewCmdList()) + cmd.AddCommand(NewCmdRemove()) + cmd.AddCommand(NewCmdSsh()) + cmd.AddCommand(NewCmdStart()) + return cmd +} + +func getMachineName(clusterName string, node minikube.NodeConfig) string { + return fmt.Sprintf("%s-%s", clusterName, node.Name) +} + +func getNode(clusterName, nodeName string) (minikube.NodeConfig, error) { + cfg, err := profile.LoadConfigFromFile(clusterName) + if err != nil && !os.IsNotExist(err) { + return minikube.NodeConfig{}, errors.Errorf("Error loading profile config: %s", err) + } + + for _, node := range cfg.Nodes { + if node.Name == nodeName { + return node, nil + } + } + + return minikube.NodeConfig{}, errors.Errorf("Node not found in cluster. cluster: %s node: %s", clusterName, nodeName) +} diff --git a/cmd/minikube/cmd/node/remove.go b/cmd/minikube/cmd/node/remove.go new file mode 100644 index 000000000000..8f08f2c155cb --- /dev/null +++ b/cmd/minikube/cmd/node/remove.go @@ -0,0 +1,77 @@ +package node + +import ( + "os" + + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "k8s.io/minikube/cmd/minikube/profile" + cmdutil "k8s.io/minikube/cmd/util" + "k8s.io/minikube/pkg/minikube/cluster" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/machine" +) + +func NewCmdRemove() *cobra.Command { + return &cobra.Command{ + Use: "remove ", + Short: "Removes a node from the cluster", + Long: "Removes a node from the cluster", + Run: remove, + } +} + +func remove(cmd *cobra.Command, args []string) { + clusterName := viper.GetString(cfg.MachineProfile) + + if len(args) == 0 || args[0] == "" { + glog.Error("node_name is required.") + os.Exit(1) + } + + nodeName := args[0] + + cfg, err := profile.LoadConfigFromFile(clusterName) + if err != nil && !os.IsNotExist(err) { + glog.Errorln("Error loading profile config: ", err) + cmdutil.MaybeReportErrorAndExit(err) + } + api, err := machine.NewAPIClient() + if err != nil { + glog.Errorf("Error getting client: %s\n", err) + os.Exit(1) + } + defer api.Close() + + for i, node := range cfg.Nodes { + if node.Name == nodeName { + machineName := getMachineName(clusterName, node) + exists, err := api.Exists(machineName) + if err != nil { + glog.Errorln("Error removing node: ", err) + os.Exit(1) + } + + if exists { + if err := cluster.DeleteHost(machineName, api); err != nil { + glog.Errorln("Error removing node: ", err) + os.Exit(1) + } + } + + cfg.Nodes = append(cfg.Nodes[:i], cfg.Nodes[i+1:]...) + break + + } else if i == len(cfg.Nodes)-1 { + glog.Errorln("Node not found: ", nodeName) + os.Exit(1) + } + } + + if err := profile.SaveConfig(clusterName, cfg); err != nil { + glog.Errorln("Error saving profile cluster configuration: ", err) + os.Exit(1) + } +} diff --git a/cmd/minikube/cmd/node/ssh.go b/cmd/minikube/cmd/node/ssh.go new file mode 100644 index 000000000000..bd7d4f528299 --- /dev/null +++ b/cmd/minikube/cmd/node/ssh.go @@ -0,0 +1,65 @@ +package node + +import ( + "fmt" + "os" + + "github.com/golang/glog" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "k8s.io/minikube/pkg/minikube/cluster" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/machine" +) + +func NewCmdSsh() *cobra.Command { + return &cobra.Command{ + Use: "ssh", + Short: "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'", + Long: "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.", + Run: ssh, + } +} + +func ssh(cmd *cobra.Command, args []string) { + clusterName := viper.GetString(cfg.MachineProfile) + + if len(args) == 0 || args[0] == "" { + glog.Error("node_name is required.") + os.Exit(1) + } + + nodeName := args[0] + args = args[1:] + + node, err := getNode(clusterName, nodeName) + if err != nil { + glog.Error("Error loading node: ", err) + os.Exit(1) + } + + api, err := machine.NewAPIClient() + if err != nil { + fmt.Fprintf(os.Stderr, "Error getting client: %s\n", err) + os.Exit(1) + } + defer api.Close() + + machineName := getMachineName(clusterName, node) + host, err := cluster.CheckIfApiExistsAndLoadByName(machineName, api) + if err != nil { + fmt.Fprintf(os.Stderr, "Error getting host: %s\n", err) + os.Exit(1) + } + if host.Driver.DriverName() == "none" { + fmt.Println(`'none' driver does not support 'minikube ssh' command`) + os.Exit(0) + } + err = cluster.CreateSSHShellByName(machineName, api, args) + if err != nil { + glog.Errorln(errors.Wrap(err, "Error attempting to ssh/run-ssh-command")) + os.Exit(1) + } +} diff --git a/cmd/minikube/cmd/node/start.go b/cmd/minikube/cmd/node/start.go new file mode 100644 index 000000000000..7fdfd47d0dbe --- /dev/null +++ b/cmd/minikube/cmd/node/start.go @@ -0,0 +1,81 @@ +package node + +import ( + "fmt" + "os" + + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "k8s.io/minikube/cmd/minikube/profile" + cmdutil "k8s.io/minikube/cmd/util" + "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/node" +) + +func NewCmdStart() *cobra.Command { + return &cobra.Command{ + Use: "start [node_name [node_name] ...]", + Short: "Starts nodes", + Long: "Starts nodes", + Run: startNode, + } +} + +func startNode(cmd *cobra.Command, args []string) { + nodeNames := args + clusterName := viper.GetString(cfg.MachineProfile) + + cfg, err := profile.LoadConfigFromFile(clusterName) + if err != nil { + glog.Errorln("Error loading profile config: ", err) + cmdutil.MaybeReportErrorAndExit(err) + } + + fmt.Println("Starting nodes...") + + api, err := machine.NewAPIClient() + if err != nil { + glog.Errorf("Error getting client: %s\n", err) + os.Exit(1) + } + defer api.Close() + + for _, nodeCfg := range cfg.Nodes { + name := nodeCfg.Name + if isExcluded(nodeNames, name) { + continue + } + + fmt.Printf("Starting node: %s\n", name) + + n := node.NewNode(nodeCfg, cfg.MachineConfig, clusterName, api) + if err := n.Start(); err != nil { + glog.Errorln("Error starting node machine: ", err) + cmdutil.MaybeReportErrorAndExit(err) + } + + b := kubeadm.NewWorkerBootstrapper(cfg.KubernetesConfig, os.Stdout) + if err := b.Bootstrap(n); err != nil { + glog.Errorln("Error bootstrapping node: ", err) + cmdutil.MaybeReportErrorAndExit(err) + } + fmt.Printf("Node %s started and configured.\n", n.Name()) + } +} + +func isExcluded(nodeNames []string, nodeName string) bool { + return len(nodeNames) > 0 && !contains(nodeNames, nodeName) +} + +func contains(s []string, v string) bool { + for _, str := range s { + if str == v { + return true + } + } + return false +} diff --git a/cmd/minikube/cmd/root.go b/cmd/minikube/cmd/root.go index 96c613a93980..e363971b28d3 100755 --- a/cmd/minikube/cmd/root.go +++ b/cmd/minikube/cmd/root.go @@ -32,6 +32,7 @@ import ( "github.com/spf13/pflag" "github.com/spf13/viper" configCmd "k8s.io/minikube/cmd/minikube/cmd/config" + nodeCmd "k8s.io/minikube/cmd/minikube/cmd/node" "k8s.io/minikube/cmd/util" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" @@ -125,12 +126,13 @@ func setFlagsUsingViper() { } func init() { - RootCmd.PersistentFlags().StringP(config.MachineProfile, "p", constants.DefaultMachineName, `The name of the minikube VM being used. + RootCmd.PersistentFlags().StringP(config.MachineProfile, "p", constants.DefaultMachineName, `The name of the minikube VM being used. This can be modified to allow for multiple minikube instances to be run independently`) RootCmd.PersistentFlags().StringP(configCmd.Bootstrapper, "b", constants.DefaultClusterBootstrapper, "The name of the cluster bootstrapper that will set up the kubernetes cluster.") RootCmd.AddCommand(configCmd.ConfigCmd) RootCmd.AddCommand(configCmd.AddonsCmd) RootCmd.AddCommand(configCmd.ProfileCmd) + RootCmd.AddCommand(nodeCmd.NewCmdNode()) pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) viper.BindPFlags(RootCmd.PersistentFlags()) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 34f19280f57f..4567b5b09a88 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "encoding/json" "fmt" "io/ioutil" "os" @@ -29,10 +28,12 @@ import ( "github.com/blang/semver" "github.com/docker/machine/libmachine/host" + "github.com/elgs/gostrgen" "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/cmd/minikube/profile" cmdutil "k8s.io/minikube/cmd/util" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/cluster" @@ -100,6 +101,8 @@ func runStart(cmd *cobra.Command, args []string) { if shouldCacheImages { go machine.CacheImagesForBootstrapper(k8sVersion, clusterBootstrapper) } + + // NOTE Instantiate docker-machine API api, err := machine.NewAPIClient() if err != nil { fmt.Fprintf(os.Stderr, "Error getting client: %s\n", err) @@ -107,11 +110,6 @@ func runStart(cmd *cobra.Command, args []string) { } defer api.Close() - exists, err := api.Exists(cfg.GetMachineName()) - if err != nil { - glog.Exitf("checking if machine exists: %s", err) - } - diskSize := viper.GetString(humanReadableDiskSize) diskSizeMB := pkgutil.CalculateDiskSizeInMB(diskSize) @@ -126,7 +124,9 @@ func runStart(cmd *cobra.Command, args []string) { validateK8sVersion(k8sVersion) } + // NOTE Create machine config config := cluster.MachineConfig{ + MachineName: cfg.GetMachineName(), MinikubeISO: viper.GetString(isoURL), Memory: viper.GetInt(memory), CPUs: viper.GetInt(cpus), @@ -147,6 +147,13 @@ func runStart(cmd *cobra.Command, args []string) { UUID: viper.GetString(uuid), } + // NOTE Check if machine exists + exists, err := api.Exists(config.MachineName) + if err != nil { + glog.Exitf("checking if machine exists: %s", err) + } + + // NOTE Start machine fmt.Printf("Starting local Kubernetes %s cluster...\n", viper.GetString(kubernetesVersion)) fmt.Println("Starting VM...") var host *host.Host @@ -163,6 +170,7 @@ func runStart(cmd *cobra.Command, args []string) { cmdutil.MaybeReportErrorAndExit(err) } + // NOTE Get machine IP fmt.Println("Getting VM IP address...") ip, err := host.Driver.GetIP() if err != nil { @@ -172,8 +180,10 @@ func runStart(cmd *cobra.Command, args []string) { selectedKubernetesVersion := viper.GetString(kubernetesVersion) + // NOTE Load cluster cfg // Load profile cluster config from file - cc, err := loadConfigFromFile(viper.GetString(cfg.MachineProfile)) + profileName := viper.GetString(cfg.MachineProfile) + cc, err := profile.LoadConfigFromFile(profileName) if err != nil && !os.IsNotExist(err) { glog.Errorln("Error loading profile config: ", err) } @@ -195,6 +205,11 @@ func runStart(cmd *cobra.Command, args []string) { } } + token, err := genBootstrapToken() + if err != nil { + glog.Exitf("Error generating bootstrap token: ", err) + } + kubernetesConfig := bootstrapper.KubernetesConfig{ KubernetesVersion: selectedKubernetesVersion, NodeIP: ip, @@ -207,8 +222,10 @@ func runStart(cmd *cobra.Command, args []string) { ServiceCIDR: pkgutil.DefaultServiceCIDR, ExtraOptions: extraOptions, ShouldLoadCachedImages: shouldCacheImages, + BootstrapToken: token, } + // NOTE Get bootstrapper k8sBootstrapper, err := GetClusterBootstrapper(api, clusterBootstrapper) if err != nil { glog.Exitf("Error getting cluster bootstrapper: %s", err) @@ -216,14 +233,16 @@ func runStart(cmd *cobra.Command, args []string) { // Write profile cluster configuration to file clusterConfig := cluster.Config{ + ClusterName: profileName, MachineConfig: config, KubernetesConfig: kubernetesConfig, } - if err := saveConfig(clusterConfig); err != nil { + if err := profile.SaveConfig(profileName, clusterConfig); err != nil { glog.Errorln("Error saving profile cluster configuration: ", err) } + // NOTE Configure machine fmt.Println("Moving files into cluster...") if err := k8sBootstrapper.UpdateCluster(kubernetesConfig); err != nil { glog.Errorln("Error updating cluster: ", err) @@ -327,10 +346,10 @@ You will need to move the files to the appropriate location and then set the cor sudo mv /root/.kube $HOME/.kube # this will write over any previous configuration sudo chown -R $USER $HOME/.kube sudo chgrp -R $USER $HOME/.kube - + sudo mv /root/.minikube $HOME/.minikube # this will write over any previous configuration sudo chown -R $USER $HOME/.minikube - sudo chgrp -R $USER $HOME/.minikube + sudo chgrp -R $USER $HOME/.minikube This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true`) } @@ -396,72 +415,15 @@ func init() { RootCmd.AddCommand(startCmd) } -// saveConfig saves profile cluster configuration in -// $MINIKUBE_HOME/profiles//config.json -func saveConfig(clusterConfig cluster.Config) error { - data, err := json.MarshalIndent(clusterConfig, "", " ") - if err != nil { - return err - } - - profileConfigFile := constants.GetProfileFile(viper.GetString(cfg.MachineProfile)) - - if err := os.MkdirAll(filepath.Dir(profileConfigFile), 0700); err != nil { - return err - } - - if err := saveConfigToFile(data, profileConfigFile); err != nil { - return err - } - - return nil -} - -func saveConfigToFile(data []byte, file string) error { - if _, err := os.Stat(file); os.IsNotExist(err) { - return ioutil.WriteFile(file, data, 0600) - } - - tmpfi, err := ioutil.TempFile(filepath.Dir(file), "config.json.tmp") +func genBootstrapToken() (string, error) { + first, err := gostrgen.RandGen(6, gostrgen.Lower|gostrgen.Digit, "", "") if err != nil { - return err - } - defer os.Remove(tmpfi.Name()) - - if err = ioutil.WriteFile(tmpfi.Name(), data, 0600); err != nil { - return err - } - - if err = tmpfi.Close(); err != nil { - return err - } - - if err = os.Remove(file); err != nil { - return err - } - - if err = os.Rename(tmpfi.Name(), file); err != nil { - return err - } - return nil -} - -func loadConfigFromFile(profile string) (cluster.Config, error) { - var cc cluster.Config - - profileConfigFile := constants.GetProfileFile(profile) - - if _, err := os.Stat(profileConfigFile); os.IsNotExist(err) { - return cc, err + return "", nil } - data, err := ioutil.ReadFile(profileConfigFile) + second, err := gostrgen.RandGen(16, gostrgen.Lower|gostrgen.Digit, "", "") if err != nil { - return cc, err - } - - if err := json.Unmarshal(data, &cc); err != nil { - return cc, err + return "", nil } - return cc, nil + return fmt.Sprintf("%s.%s", first, second), nil } diff --git a/cmd/minikube/cmd/start_node.go b/cmd/minikube/cmd/start_node.go new file mode 100644 index 000000000000..2a1c03473bcb --- /dev/null +++ b/cmd/minikube/cmd/start_node.go @@ -0,0 +1,28 @@ +package cmd + +import ( + "fmt" + + "github.com/docker/machine/libmachine" + "github.com/golang/glog" + "k8s.io/minikube/pkg/minikube/cluster" +) + +func startNodes(api libmachine.API, masterIP string, baseConfig cluster.Config, count int) error { + for i := 0; i < count; i++ { + name := fmt.Sprintf("%s-%d", baseConfig.MachineConfig.MachineName, i+1) + newConfig := newConfig(baseConfig.MachineConfig, name) + glog.Infoln("Creating machine: %s", name) + _, err := cluster.StartHost(api, newConfig) + if err != nil { + return err + } + } + + return nil +} + +func newConfig(baseConfig cluster.MachineConfig, machineName string) cluster.MachineConfig { + baseConfig.MachineName = machineName + return baseConfig +} diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 6c74d7031e4a..0d020843384e 100755 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -66,7 +66,7 @@ var statusCmd = &cobra.Command{ } defer api.Close() - ms, err := cluster.GetHostStatus(api) + ms, err := cluster.GetHostStatus(config.GetMachineName(), api) if err != nil { glog.Errorln("Error getting machine status:", err) cmdUtil.MaybeReportErrorAndExitWithCode(err, internalErrorCode) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index 416e98259db6..8359f7342433 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/cobra" cmdUtil "k8s.io/minikube/cmd/util" "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/machine" ) @@ -41,7 +42,7 @@ itself, leaving all files intact. The cluster can be started again with the "sta } defer api.Close() - if err = cluster.StopHost(api); err != nil { + if err = cluster.StopHost(config.GetMachineName(), api); err != nil { fmt.Println("Error stopping machine: ", err) cmdUtil.MaybeReportErrorAndExit(err) } diff --git a/cmd/minikube/profile/profile.go b/cmd/minikube/profile/profile.go new file mode 100644 index 000000000000..6831800dda44 --- /dev/null +++ b/cmd/minikube/profile/profile.go @@ -0,0 +1,121 @@ +package profile + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/pkg/errors" + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/cluster" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + pkgutil "k8s.io/minikube/pkg/util" +) + +// SaveConfig saves profile cluster configuration in +// $MINIKUBE_HOME/profiles//config.json +func SaveConfig(profile string, clusterConfig cluster.Config) error { + data, err := json.MarshalIndent(clusterConfig, "", " ") + if err != nil { + return err + } + + profileConfigFile := constants.GetProfileFile(viper.GetString(cfg.MachineProfile)) + + if err := os.MkdirAll(filepath.Dir(profileConfigFile), 0700); err != nil { + return err + } + + if err := saveConfigToFile(data, profileConfigFile); err != nil { + return err + } + + return nil +} + +func saveConfigToFile(data []byte, file string) error { + if _, err := os.Stat(file); os.IsNotExist(err) { + return ioutil.WriteFile(file, data, 0600) + } + + tmpfi, err := ioutil.TempFile(filepath.Dir(file), "config.json.tmp") + if err != nil { + return err + } + defer os.Remove(tmpfi.Name()) + + if err = ioutil.WriteFile(tmpfi.Name(), data, 0600); err != nil { + return err + } + + if err = tmpfi.Close(); err != nil { + return err + } + + if err = os.Remove(file); err != nil { + return err + } + + if err = os.Rename(tmpfi.Name(), file); err != nil { + return err + } + return nil +} + +func LoadConfigFromFile(profile string) (cluster.Config, error) { + var cc cluster.Config + + if profile == "" { + return cc, fmt.Errorf("Profile name cannot be empty.") + } + + profileConfigFile := constants.GetProfileFile(profile) + + if _, err := os.Stat(profileConfigFile); os.IsNotExist(err) { + return cc, err + } + + data, err := ioutil.ReadFile(profileConfigFile) + if err != nil { + return cc, err + } + + if err := json.Unmarshal(data, &cc); err != nil { + return cc, err + } + + cc.MachineConfig.Downloader = pkgutil.DefaultDownloader{} + + return cc, nil +} + +func LoadClusterConfigs() ([]cluster.Config, error) { + files := constants.GetProfileFiles() + + configs := make([]cluster.Config, len(files)) + for i, f := range files { + c, err := loadConfigFromFile(f) + if err != nil { + return []cluster.Config{}, errors.Wrapf(err, "Error loading config from file: %s", f) + } + configs[i] = c + } + + return configs, nil +} + +func loadConfigFromFile(file string) (cluster.Config, error) { + var c cluster.Config + + reader, err := os.Open(file) + defer reader.Close() + if err != nil { + return c, err + } + + err = json.NewDecoder(reader).Decode(&c) + return c, err +} diff --git a/docs/multi-node-prototype.md b/docs/multi-node-prototype.md new file mode 100644 index 000000000000..bf883bdf727d --- /dev/null +++ b/docs/multi-node-prototype.md @@ -0,0 +1,155 @@ +# multi-node prototype + +## Motivation + +This was inspired by the requests in https://github.com/kubernetes/minikube/issues/94. Having multiple Kubernetes nodes in minikube allows one to test and play around with Kubernetes features that are not available in a single-node cluster. + +Some examples: + +* Scheduling behavior based on + * Resource allocation + * Node selector + * Anti-affinity + * Taints + +* Networking layer + * Experimenting with different CNI plugins and configurations + +* Self-healing + * Simulating node failure + +* Developing Kube components + * Use a multi-node cluster to reproduce kube bugs + * Deploy custom binaries built locally? + +## How to use + +* Check out this branch +* Build minikube using `make out/minikube` (see [Build Guide](docs/contributors/build_guide.md) for more info) +* Run custom binary for all operations, eg: + + ```shell + out/minikube start + out/minikube node add node-1 + out/minikube node start + ``` + +## Current setup and limitations + +### Features + +* Pods can communicate across nodes +* Nodes get non-overlapping subnets for pod IPs +* Theoretically, different CNIs can be used + +### Limitations + +* Tested only with this combination: + * `minikube-darwin-amd64` build, based off v0.24.1 revision + * virtualbox driver + * kubernetes v1.8.0 + * flanneld CNI plugin v0.9.1 +* Only works with `kubeadm` bootstrapper (using `kubeadm join` command) +* Code is hacked-in, minimal refactoring done + * A much cleaner refactoring would be needed to implement in a sustainable way +* CNI (in this case, flannel) is deployed manually, out-of-band (see [kube-flannel.yml](demo/kube-flannel.yml)) +* Pods must be started only after CNI is up and running, otherwise they get a host-local IP instead of a cluster-routable one. +* Hard-coded pod subnets in CNI config (set up for `flannel`) + * Same subnet must be used in `controller-manager` config _AND_ in `flannel` config + * Currently hard-coded to `10.244.0.0/16` for flannel +* Some components are explicitly told to use `eth1` address, as `eth0` is not externally routable in the Virtualbox config provided by `libmachine`: + * `kubelet` + * `flannel` +* All nodes share the same configuration, including CPU, disk, and memory capacity + +### Bugs + +* kube-dns Pods need to be deleted and re-created after CNI is installed + + This is because they will not have be in the right subnet when they are first started. + +* Deleting the cluster with `minikube delete` will not delete node data and will cause subsequent node starts to crash if the nodes have the same name. + + To fix this you must: + + * Remove the nodes one by one with `minikube node remove ` _before_ deleting the minikube cluster + + _or_ + + * Delete Virtualbox VMs manually + * Delete the machine data folders manually with: + + ```shell + rm -r ~/.minikube/machines/minikube-*/ + ``` + + where your minikube config path is `~/.minikube` and the minikube profile name is `minikube` (the default) + +## Design concerns + +### UX + +* What should commands look like? + + `minikube node [subcommand]` ? + + Where `subcommand` is: + + * add + * remove + * start + * stop + * ssh + * docker-env + * status + * list + +### Drivers + +* Can all drivers (except `none`) support multi-node? Would it require a lot of custom code for each driver? + + From my experience with the Virtualbox setup, the main requirement is that each node have a unique IP that is routable among all nodes. Most network plugins require layer 2 connectivity between the nodes. This should not be a problem in a local VM network. + +### Local docker images + +* A useful pattern with a single node is to be able to run `eval "$(minikube docker-env)"` and push images directly to the local docker node's daemon. With multiple nodes this becomes trickier. + + There may be some clever solutions to this, but the simplest one is probably just some documentation or helper scrips that allow one to run `docker push` (or other commands) against all the nodes in a loop. + +### Networking + +Multi-node networking requires: + +* subnet allocation for each node +* out-of-band network control/routing plane via CNI or out-of-band agents + +For single-node setups, this is not required. + +* What's a good way to accommodate multi-node without increasing complexity for single-node setups? + + Perhaps multi-node networking could be installed by minikube only after a worker node is added, so for single-node setups no extra complexity is introduced? + +* Should minikube be responsible for configuring the network? + + In my opinion, yes, but it should be overrideable by the user. + +* How should we set up networking? CNI plugin, or out-of-band daemon? + + From what I understand, CNI allows the kubelet to set up networking on-demand, whereas an out-of-band daemon needs to hook into docker's configuration and set up `docker0`'s subnet at startup. + + It seems like CNI would be easier to implement, as it can be deployed as a daemonset via the Kube API. + +* How do we ensure that networking is set up before any other pods are deployed? + + A taint might help with this. It could be removed after CNI is installed + +* How do we transition into multi-node networking from single-node if Pods are already running with node-local subnets? + + This is not an issue if CNI is always installed, and there is no distinction between single-node and multi-node networking. + + +### Bootstrappers + +* Do we support both `localkube` and `kubeadm`? + +kubeadm is designed for multi-node whereas localkube is not. Is it reasonable to say that multi-node is only supported by `kubeadm`? diff --git a/multi-node-demo/demo-magic.sh b/multi-node-demo/demo-magic.sh new file mode 100644 index 000000000000..fed89cfed156 --- /dev/null +++ b/multi-node-demo/demo-magic.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash + +############################################################################### +# +# demo-magic.sh +# +# Copyright (c) 2015 Paxton Hare +# +# This script lets you script demos in bash. It runs through your demo script when you press +# ENTER. It simulates typing and runs commands. +# +############################################################################### + +# the speed to "type" the text +TYPE_SPEED=20 + +# no wait after "p" or "pe" +NO_WAIT=false + +# if > 0, will pause for this amount of seconds before automatically proceeding with any p or pe +PROMPT_TIMEOUT=0 + +# handy color vars for pretty prompts +BLACK="\033[0;30m" +BLUE="\033[0;34m" +GREEN="\033[0;32m" +CYAN="\033[0;36m" +RED="\033[0;31m" +PURPLE="\033[0;35m" +BROWN="\033[0;33m" +WHITE="\033[1;37m" +COLOR_RESET="\033[0m" + +## +# prints the script usage +## +function usage() { + echo -e "" + echo -e "Usage: $0 [options]" + echo -e "" + echo -e "\tWhere options is one or more of:" + echo -e "\t-h\tPrints Help text" + echo -e "\t-d\tDebug mode. Disables simulated typing" + echo -e "\t-n\tNo wait" + echo -e "\t-w\tWaits max the given amount of seconds before proceeding with demo (e.g. `-w5`)" + echo -e "" +} + +## +# wait for user to press ENTER +# if $PROMPT_TIMEOUT > 0 this will be used as the max time for proceeding automatically +## +function wait() { + if [[ "$PROMPT_TIMEOUT" == "0" ]]; then + read -rs + else + read -rst "$PROMPT_TIMEOUT" + fi +} + +## +# print command only. Useful for when you want to pretend to run a command +# +# takes 1 param - the string command to print +# +# usage: p "ls -l" +# +## +function p() { + cmd=$1 + + # render the prompt + x=$(PS1="$DEMO_PROMPT" "$BASH" --norc -i &1 | sed -n '${s/^\(.*\)exit$/\1/p;}') + printf "$x" + + # wait for the user to press a key before typing the command + if !($NO_WAIT); then + wait + fi + + if [[ -z $TYPE_SPEED ]]; then + echo -en "\033[0m$cmd" + else + echo -en "\033[0m$cmd" | pv -qL $[$TYPE_SPEED+(-2 + RANDOM%5)]; + fi + + # wait for the user to press a key before moving on + if !($NO_WAIT); then + wait + fi + echo "" +} + +## +# Prints and executes a command +# +# takes 1 parameter - the string command to run +# +# usage: pe "ls -l" +# +## +function pe() { + # print the command + p "$@" + + # execute the command + eval "$@" +} + +## +# Enters script into interactive mode +# +# and allows newly typed commands to be executed within the script +# +# usage : cmd +# +## +function cmd() { + # render the prompt + x=$(PS1="$DEMO_PROMPT" "$BASH" --norc -i &1 | sed -n '${s/^\(.*\)exit$/\1/p;}') + printf "$x\033[0m" + read command + eval "${command}" +} + + +function check_pv() { + command -v pv >/dev/null 2>&1 || { + + echo "" + echo -e "${RED}##############################################################" + echo "# HOLD IT!! I require pv but it's not installed. Aborting." >&2; + echo -e "${RED}##############################################################" + echo "" + echo -e "${COLOR_RESET}Installing pv:" + echo "" + echo -e "${BLUE}Mac:${COLOR_RESET} $ brew install pv" + echo "" + echo -e "${BLUE}Other:${COLOR_RESET} http://www.ivarch.com/programs/pv.shtml" + echo -e "${COLOR_RESET}" + exit 1; + } +} + +check_pv +# +# handle some default params +# -h for help +# -d for disabling simulated typing +# +while getopts ":dhnw:" opt; do + case $opt in + h) + usage + exit 1 + ;; + d) + unset TYPE_SPEED + ;; + n) + NO_WAIT=true + ;; + w) + PROMPT_TIMEOUT=$OPTARG + esac +done diff --git a/multi-node-demo/demo.sh b/multi-node-demo/demo.sh new file mode 100755 index 000000000000..c36f5cb77769 --- /dev/null +++ b/multi-node-demo/demo.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname $0)" || exit + +source demo-magic.sh -n + +DEMO_PROMPT="${CYAN}$ " +MINIKUBE_CMD="../out/minikube" +TYPE_SPEED=15 + +p "# In this demo I will start up a 4-node minikube cluster." +p "# Three workers and one server" +p "" + +p "# Start minikube master ..." +pe "$MINIKUBE_CMD start" + +p "" +pe "$MINIKUBE_CMD node list" + +p "" +p "# Add some nodes ..." +pe "$MINIKUBE_CMD node add" +pe "$MINIKUBE_CMD node add" +pe "$MINIKUBE_CMD node add" + +p "" +pe "$MINIKUBE_CMD node list" + +p "" +p "# Start nodes ..." +pe "$MINIKUBE_CMD node start" + +p "" +pe "$MINIKUBE_CMD node list" + +sleep 10 + +p "" +pe "kubectl get nodes" + +p "" +p "# Installing Pod network ..." +pe "kubectl apply -f kube-flannel.yml" + +p "" +p "# Wait for flannel to start ..." +pe "kubectl --namespace=kube-system rollout status ds/kube-flannel-ds" + +p "" +p "# Deploy our pods ..." +pe "cat hello-deployment.yml" +pe "kubectl apply -f hello-deployment.yml" + +p "" +pe "kubectl rollout status deployment/hello" + +p "" +p "# Deploy our service ..." +pe "cat hello-svc.yml" +pe "kubectl apply -f hello-svc.yml" + +p "" +pe "# Note Pod IPs ..." +pe "kubectl get pod -o wide" + +p "" +pe "$MINIKUBE_CMD service list" + +p "" +ip=$($MINIKUBE_CMD ip) +pe "for i in \$(seq 1 10); do curl http://$ip:31000; echo; sleep 0.4; done" + +p "# Yay" diff --git a/multi-node-demo/hello-deployment.yml b/multi-node-demo/hello-deployment.yml new file mode 100644 index 000000000000..09f2d67ab816 --- /dev/null +++ b/multi-node-demo/hello-deployment.yml @@ -0,0 +1,30 @@ +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: hello +spec: + replicas: 4 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 100% + template: + metadata: + labels: + app: hello + spec: + affinity: + # ⬇⬇⬇ This ensures pods will land on separate hosts + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: [{ key: app, operator: In, values: [hello-from] }] + topologyKey: "kubernetes.io/hostname" + containers: + - name: hello-from + image: pbitty/hello-from:latest + ports: + - name: http + containerPort: 80 + terminationGracePeriodSeconds: 1 diff --git a/multi-node-demo/hello-svc.yml b/multi-node-demo/hello-svc.yml new file mode 100644 index 000000000000..9109a339a4a6 --- /dev/null +++ b/multi-node-demo/hello-svc.yml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: hello +spec: + type: NodePort + selector: + app: hello + ports: + - protocol: TCP + nodePort: 31000 + port: 80 + targetPort: http diff --git a/multi-node-demo/kube-flannel.yml b/multi-node-demo/kube-flannel.yml new file mode 100644 index 000000000000..aabbbdb9e1c9 --- /dev/null +++ b/multi-node-demo/kube-flannel.yml @@ -0,0 +1,140 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "type": "flannel", + "delegate": { + "isDefaultGateway": true + } + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan" + } + } +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: kube-system + labels: + tier: node + app: flannel +spec: + updateStrategy: + type: RollingUpdate + minReadySeconds: 5 + template: + metadata: + labels: + tier: node + app: flannel + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.9.1-amd64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conf + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.9.1-amd64 + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--iface", "eth1" ] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 73ba18fac22f..3cc9cd56b732 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -45,6 +45,8 @@ type KubernetesConfig struct { ExtraOptions util.ExtraOptionSlice ShouldLoadCachedImages bool + + BootstrapToken string } const ( diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index f2cdf52f450e..bb20d2714009 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/clientcmd/api/latest" "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/util/kubeconfig" @@ -41,7 +42,7 @@ var ( ) // SetupCerts gets the generated credentials required to talk to the APIServer. -func SetupCerts(cmd CommandRunner, k8s KubernetesConfig) error { +func SetupCerts(cmd runner.CommandRunner, k8s KubernetesConfig) error { localPath := constants.GetMinipath() glog.Infof("Setting up certificates for IP: %s\n", k8s.NodeIP) diff --git a/pkg/minikube/bootstrapper/certs_test.go b/pkg/minikube/bootstrapper/certs_test.go index 94e5254d7064..1fa9ccabde80 100644 --- a/pkg/minikube/bootstrapper/certs_test.go +++ b/pkg/minikube/bootstrapper/certs_test.go @@ -21,6 +21,7 @@ import ( "path/filepath" "testing" + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/tests" "k8s.io/minikube/pkg/util" @@ -30,7 +31,7 @@ func TestSetupCerts(t *testing.T) { tempDir := tests.MakeTempDir() defer os.RemoveAll(tempDir) - f := NewFakeCommandRunner() + f := runner.NewFakeCommandRunner() k8s := KubernetesConfig{ APIServerName: constants.APIServerName, DNSDomain: constants.ClusterDNSDomain, diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 94dcd34c8d28..60996058b0df 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -33,6 +33,7 @@ import ( "golang.org/x/sync/errgroup" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/machine" @@ -41,30 +42,52 @@ import ( ) type KubeadmBootstrapper struct { - c bootstrapper.CommandRunner + c runner.CommandRunner + ip string + machineName string } func NewKubeadmBootstrapper(api libmachine.API) (*KubeadmBootstrapper, error) { - h, err := api.Load(config.GetMachineName()) + return NewKubeadmBootstrapperForMachine(config.GetMachineName(), api) +} + +func NewKubeadmBootstrapperForMachine(machineName string, api libmachine.API) (*KubeadmBootstrapper, error) { + h, err := api.Load(machineName) if err != nil { return nil, errors.Wrap(err, "getting api client") } - var cmd bootstrapper.CommandRunner + var cmd runner.CommandRunner // The none driver executes commands directly on the host if h.Driver.DriverName() == constants.DriverNone { - cmd = &bootstrapper.ExecRunner{} + cmd = &runner.ExecRunner{} } else { client, err := sshutil.NewSSHClient(h.Driver) if err != nil { return nil, errors.Wrap(err, "getting ssh client") } - cmd = bootstrapper.NewSSHRunner(client) + cmd = runner.NewSSHRunner(client) + } + + ip, err := h.Driver.GetIP() + if err != nil { + return nil, errors.Wrap(err, "getting host IP") } + return &KubeadmBootstrapper{ - c: cmd, + c: cmd, + ip: ip, + machineName: machineName, }, nil } +func NewKubeadmBootstrapperForRunner(machineName, ip string, c runner.CommandRunner) *KubeadmBootstrapper { + return &KubeadmBootstrapper{ + c: c, + ip: ip, + machineName: machineName, + } +} + //TODO(r2d4): This should most likely check the health of the apiserver func (k *KubeadmBootstrapper) GetClusterStatus() (string, error) { statusCmd := `sudo systemctl is-active kubelet &>/dev/null && echo "Running" || echo "Stopped"` @@ -106,7 +129,7 @@ func (k *KubeadmBootstrapper) StartCluster(k8s bootstrapper.KubernetesConfig) er // We use --skip-preflight-checks since we have our own custom addons // that we also stick in /etc/kubernetes/manifests b := bytes.Buffer{} - if err := kubeadmInitTemplate.Execute(&b, struct{ KubeadmConfigFile string }{constants.KubeadmConfigFile}); err != nil { + if err := kubeadmInitTemplate.Execute(&b, struct{ KubeadmConfigFile, Token string }{constants.KubeadmConfigFile, k8s.BootstrapToken}); err != nil { return err } @@ -128,6 +151,31 @@ func (k *KubeadmBootstrapper) StartCluster(k8s bootstrapper.KubernetesConfig) er return nil } +func (k *KubeadmBootstrapper) JoinNode(k8s bootstrapper.KubernetesConfig) error { + // We use --skip-preflight-checks since we have our own custom addons + // that we also stick in /etc/kubernetes/manifests + b := bytes.Buffer{} + if err := kubeadmJoinTemplate.Execute(&b, struct{ Token, ServerAddress string }{k8s.BootstrapToken, fmt.Sprintf("%s:8443", k8s.NodeIP)}); err != nil { + return err + } + + err := k.c.Run(b.String()) + if err != nil { + return errors.Wrapf(err, "kubeadm init error running command: %s", b.String()) + } + + err = k.c.Run(` +sudo systemctl daemon-reload && +sudo systemctl enable kubelet && +sudo systemctl start kubelet +`) + if err != nil { + return errors.Wrap(err, "starting kubelet") + } + + return nil +} + //TODO(r2d4): Split out into shared function between localkube and kubeadm func addAddons(files *[]assets.CopyableFile) error { // add addons to file list @@ -210,7 +258,7 @@ func SetContainerRuntime(cfg map[string]string, runtime string) map[string]strin // NewKubeletConfig generates a new systemd unit containing a configured kubelet // based on the options present in the KubernetesConfig. -func NewKubeletConfig(k8s bootstrapper.KubernetesConfig) (string, error) { +func NewKubeletConfig(hostname, ip string, k8s bootstrapper.KubernetesConfig) (string, error) { version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { return "", errors.Wrap(err, "parsing kubernetes version") @@ -228,10 +276,14 @@ func NewKubeletConfig(k8s bootstrapper.KubernetesConfig) (string, error) { ExtraOptions string FeatureGates string ContainerRuntime string + Hostname string + NodeIP string }{ ExtraOptions: extraFlags, FeatureGates: k8s.FeatureGates, ContainerRuntime: k8s.ContainerRuntime, + Hostname: hostname, + NodeIP: ip, } if err := kubeletSystemdTemplate.Execute(&b, opts); err != nil { return "", err @@ -243,14 +295,18 @@ func NewKubeletConfig(k8s bootstrapper.KubernetesConfig) (string, error) { func (k *KubeadmBootstrapper) UpdateCluster(cfg bootstrapper.KubernetesConfig) error { if cfg.ShouldLoadCachedImages { // Make best effort to load any cached images - go machine.LoadImages(k.c, constants.GetKubeadmCachedImages(cfg.KubernetesVersion), constants.ImageCacheDir) + glog.Infoln("Loading cached images....") + err := machine.LoadImages(k.c, constants.GetKubeadmCachedImages(cfg.KubernetesVersion), constants.ImageCacheDir) + if err != nil { + glog.Infoln("Could not load all cached images, ignoring and continuing. Error: ", err) + } } kubeadmCfg, err := generateConfig(cfg) if err != nil { return errors.Wrap(err, "generating kubeadm cfg") } - kubeletCfg, err := NewKubeletConfig(cfg) + kubeletCfg, err := NewKubeletConfig("minikube", k.ip, cfg) if err != nil { return errors.Wrap(err, "generating kubelet config") } @@ -305,6 +361,48 @@ sudo systemctl start kubelet return nil } +func (k *KubeadmBootstrapper) UpdateNode(cfg bootstrapper.KubernetesConfig) error { + kubeletCfg, err := NewKubeletConfig(k.machineName, k.ip, cfg) + if err != nil { + return errors.Wrap(err, "generating kubelet config") + } + + files := []assets.CopyableFile{ + assets.NewMemoryAssetTarget([]byte(kubeletService), constants.KubeletServiceFile, "0640"), + assets.NewMemoryAssetTarget([]byte(kubeletCfg), constants.KubeletSystemdConfFile, "0640"), + } + + var g errgroup.Group + for _, bin := range []string{"kubelet", "kubeadm"} { + bin := bin + g.Go(func() error { + path, err := maybeDownloadAndCache(bin, cfg.KubernetesVersion) + if err != nil { + return errors.Wrapf(err, "downloading %s", bin) + } + f, err := assets.NewFileAsset(path, "/usr/bin", bin, "0641") + if err != nil { + return errors.Wrap(err, "making new file asset") + } + if err := k.c.Copy(f); err != nil { + return errors.Wrapf(err, "transferring kubeadm file: %+v", f) + } + return nil + }) + } + if err := g.Wait(); err != nil { + return errors.Wrap(err, "downloading binaries") + } + + for _, f := range files { + if err := k.c.Copy(f); err != nil { + return errors.Wrapf(err, "transferring kubeadm file: %+v", f) + } + } + + return nil +} + func generateConfig(k8s bootstrapper.KubernetesConfig) (string, error) { version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { @@ -320,21 +418,26 @@ func generateConfig(k8s bootstrapper.KubernetesConfig) (string, error) { opts := struct { CertDir string ServiceCIDR string + PodsCIDR string AdvertiseAddress string APIServerPort int KubernetesVersion string EtcdDataDir string NodeName string ExtraArgs []ComponentExtraArgs + Token string }{ - CertDir: util.DefaultCertPath, - ServiceCIDR: util.DefaultServiceCIDR, + CertDir: util.DefaultCertPath, + ServiceCIDR: util.DefaultServiceCIDR, + // TODO Make configurable + PodsCIDR: "10.244.0.0/16", AdvertiseAddress: k8s.NodeIP, APIServerPort: util.APIServerPort, KubernetesVersion: k8s.KubernetesVersion, EtcdDataDir: "/data", //TODO(r2d4): change to something else persisted NodeName: k8s.NodeName, ExtraArgs: extraComponentConfig, + Token: k8s.BootstrapToken, } b := bytes.Buffer{} diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go index d0063dbcef1b..d9093009f222 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go @@ -46,6 +46,7 @@ kubernetesVersion: v1.8.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 + podSubnet: 10.244.0.0/16 etcd: dataDir: /data nodeName: minikube @@ -84,6 +85,7 @@ kubernetesVersion: v1.8.0-alpha.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 + podSubnet: 10.244.0.0/16 etcd: dataDir: /data nodeName: extra-args-minikube @@ -123,6 +125,7 @@ kubernetesVersion: v1.8.0-alpha.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 + podSubnet: 10.244.0.0/16 etcd: dataDir: /data nodeName: extra-args-minikube @@ -148,6 +151,7 @@ kubernetesVersion: v1.8.0-alpha.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 + podSubnet: 10.244.0.0/16 etcd: dataDir: /data nodeName: extra-args-minikube @@ -183,6 +187,7 @@ kubernetesVersion: v1.8.0-alpha.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 + podSubnet: 10.244.0.0/16 etcd: dataDir: /data nodeName: extra-args-minikube diff --git a/pkg/minikube/bootstrapper/kubeadm/templates.go b/pkg/minikube/bootstrapper/kubeadm/templates.go index 6dbd445b0ed9..1baae390cf64 100644 --- a/pkg/minikube/bootstrapper/kubeadm/templates.go +++ b/pkg/minikube/bootstrapper/kubeadm/templates.go @@ -22,6 +22,7 @@ import ( "text/template" ) +// TODO Make template more readable var kubeadmConfigTemplate = template.Must(template.New("kubeadmConfigTemplate").Funcs(template.FuncMap{ "printMapInOrder": printMapInOrder, }).Parse(`apiVersion: kubeadm.k8s.io/v1alpha1 @@ -33,17 +34,18 @@ kubernetesVersion: {{.KubernetesVersion}} certificatesDir: {{.CertDir}} networking: serviceSubnet: {{.ServiceCIDR}} + podSubnet: {{.PodsCIDR}} etcd: dataDir: {{.EtcdDataDir}} nodeName: {{.NodeName}} {{range .ExtraArgs}}{{.Component}}:{{range $i, $val := printMapInOrder .Options ": " }} {{$val}}{{end}} -{{end}}`)) +{{end}}{{if .Token}}token: {{.Token}}{{end}}`)) var kubeletSystemdTemplate = template.Must(template.New("kubeletSystemdTemplate").Parse(` [Service] ExecStart= -ExecStart=/usr/bin/kubelet {{.ExtraOptions}} {{if .FeatureGates}}--feature-gates={{.FeatureGates}}{{end}} +ExecStart=/usr/bin/kubelet --node-ip={{.NodeIP}} --hostname-override={{.Hostname}} {{.ExtraOptions}} {{if .FeatureGates}}--feature-gates={{.FeatureGates}}{{end}} [Install] {{if or (eq .ContainerRuntime "cri-o") (eq .ContainerRuntime "cri")}}Wants=crio.service{{else}}Wants=docker.socket{{end}} @@ -73,6 +75,8 @@ sudo /usr/bin/kubeadm alpha phase etcd local --config {{.KubeadmConfigFile}} var kubeadmInitTemplate = template.Must(template.New("kubeadmInitTemplate").Parse("sudo /usr/bin/kubeadm init --config {{.KubeadmConfigFile}} --skip-preflight-checks")) +var kubeadmJoinTemplate = template.Must(template.New("kubeadmJoinTemplate").Parse("sudo /usr/bin/kubeadm join --token {{.Token}} {{.ServerAddress}}")) + // printMapInOrder sorts the keys and prints the map in order, combining key // value pairs with the separator character // diff --git a/pkg/minikube/bootstrapper/kubeadm/versions.go b/pkg/minikube/bootstrapper/kubeadm/versions.go index 55b740d82e0e..7c3595feeaa4 100644 --- a/pkg/minikube/bootstrapper/kubeadm/versions.go +++ b/pkg/minikube/bootstrapper/kubeadm/versions.go @@ -174,7 +174,6 @@ var versionSpecificOpts = []VersionedExtraOption{ NewUnversionedOption(Kubelet, "kubeconfig", "/etc/kubernetes/kubelet.conf"), NewUnversionedOption(Kubelet, "bootstrap-kubeconfig", "/etc/kubernetes/bootstrap-kubelet.conf"), NewUnversionedOption(Kubelet, "require-kubeconfig", "true"), - NewUnversionedOption(Kubelet, "hostname-override", "minikube"), // System pods args NewUnversionedOption(Kubelet, "pod-manifest-path", "/etc/kubernetes/manifests"), @@ -183,6 +182,7 @@ var versionSpecificOpts = []VersionedExtraOption{ // Network args NewUnversionedOption(Kubelet, "cluster-dns", "10.96.0.10"), NewUnversionedOption(Kubelet, "cluster-domain", "cluster.local"), + NewUnversionedOption(Kubelet, "network-plugin", "cni"), // Auth args NewUnversionedOption(Kubelet, "authorization-mode", "Webhook"), diff --git a/pkg/minikube/bootstrapper/kubeadm/worker.go b/pkg/minikube/bootstrapper/kubeadm/worker.go new file mode 100644 index 000000000000..abcc25c2da8b --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/worker.go @@ -0,0 +1,48 @@ +package kubeadm + +import ( + "fmt" + "io" + + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube" + "k8s.io/minikube/pkg/minikube/bootstrapper" +) + +type WorkerBootstrapper struct { + config bootstrapper.KubernetesConfig + ui io.Writer +} + +func NewWorkerBootstrapper(c bootstrapper.KubernetesConfig, ui io.Writer) minikube.Bootstrapper { + return &WorkerBootstrapper{config: c, ui: ui} +} + +func (nb *WorkerBootstrapper) Bootstrap(n minikube.Node) error { + ip, err := n.IP() + if err != nil { + return errors.Wrap(err, "Error getting node's IP") + } + + runner, err := n.Runner() + if err != nil { + return errors.Wrap(err, "Error getting node's runner") + } + + b := NewKubeadmBootstrapperForRunner(n.MachineName(), ip, runner) + + fmt.Fprintln(nb.ui, "Moving assets into node...") + if err := b.UpdateNode(nb.config); err != nil { + return errors.Wrap(err, "Error updating node") + } + fmt.Fprintln(nb.ui, "Setting up certs...") + if err := b.SetupCerts(nb.config); err != nil { + return errors.Wrap(err, "Error configuring authentication") + } + + fmt.Fprintln(nb.ui, "Joining node to cluster...") + if err := b.JoinNode(nb.config); err != nil { + return errors.Wrap(err, "Error joining node to cluster") + } + return nil +} diff --git a/pkg/minikube/bootstrapper/localkube/localkube.go b/pkg/minikube/bootstrapper/localkube/localkube.go index a91a811aea7a..a2f948d25d4f 100644 --- a/pkg/minikube/bootstrapper/localkube/localkube.go +++ b/pkg/minikube/bootstrapper/localkube/localkube.go @@ -22,6 +22,7 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/machine" @@ -33,7 +34,7 @@ import ( ) type LocalkubeBootstrapper struct { - cmd bootstrapper.CommandRunner + cmd runner.CommandRunner } func NewLocalkubeBootstrapper(api libmachine.API) (*LocalkubeBootstrapper, error) { @@ -41,16 +42,16 @@ func NewLocalkubeBootstrapper(api libmachine.API) (*LocalkubeBootstrapper, error if err != nil { return nil, errors.Wrap(err, "getting api client") } - var cmd bootstrapper.CommandRunner + var cmd runner.CommandRunner // The none driver executes commands directly on the host if h.Driver.DriverName() == constants.DriverNone { - cmd = &bootstrapper.ExecRunner{} + cmd = &runner.ExecRunner{} } else { client, err := sshutil.NewSSHClient(h.Driver) if err != nil { return nil, errors.Wrap(err, "getting ssh client") } - cmd = bootstrapper.NewSSHRunner(client) + cmd = runner.NewSSHRunner(client) } return &LocalkubeBootstrapper{ cmd: cmd, diff --git a/pkg/minikube/bootstrapper/localkube/localkube_test.go b/pkg/minikube/bootstrapper/localkube/localkube_test.go index e7b346919fe6..6cd2ca14a16f 100644 --- a/pkg/minikube/bootstrapper/localkube/localkube_test.go +++ b/pkg/minikube/bootstrapper/localkube/localkube_test.go @@ -20,6 +20,7 @@ import ( "testing" "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/constants" ) @@ -46,7 +47,7 @@ func TestStartCluster(t *testing.T) { for _, test := range cases { t.Run(test.description, func(t *testing.T) { t.Parallel() - f := bootstrapper.NewFakeCommandRunner() + f := runner.NewFakeCommandRunner() f.SetCommandToOutput(map[string]string{test.startCmd: "ok"}) l := LocalkubeBootstrapper{f} err := l.StartCluster(bootstrapper.KubernetesConfig{}) @@ -96,7 +97,7 @@ func TestUpdateCluster(t *testing.T) { for _, test := range cases { t.Run(test.description, func(t *testing.T) { t.Parallel() - f := bootstrapper.NewFakeCommandRunner() + f := runner.NewFakeCommandRunner() l := LocalkubeBootstrapper{f} err := l.UpdateCluster(test.k8s) if err != nil && !test.shouldErr { @@ -149,7 +150,7 @@ func TestGetLocalkubeStatus(t *testing.T) { for _, test := range cases { t.Run(test.description, func(t *testing.T) { t.Parallel() - f := bootstrapper.NewFakeCommandRunner() + f := runner.NewFakeCommandRunner() f.SetCommandToOutput(test.statusCmdMap) l := LocalkubeBootstrapper{f} actualStatus, err := l.GetClusterStatus() @@ -203,7 +204,7 @@ func TestGetHostLogs(t *testing.T) { for _, test := range cases { t.Run(test.description, func(t *testing.T) { t.Parallel() - f := bootstrapper.NewFakeCommandRunner() + f := runner.NewFakeCommandRunner() f.SetCommandToOutput(test.logsCmdMap) l := LocalkubeBootstrapper{f} _, err := l.GetClusterLogs(test.follow) diff --git a/pkg/minikube/bootstrapper/command_runner.go b/pkg/minikube/bootstrapper/runner/command_runner.go similarity index 98% rename from pkg/minikube/bootstrapper/command_runner.go rename to pkg/minikube/bootstrapper/runner/command_runner.go index cb019243276a..466c086f8925 100644 --- a/pkg/minikube/bootstrapper/command_runner.go +++ b/pkg/minikube/bootstrapper/runner/command_runner.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package bootstrapper +package runner import ( "fmt" diff --git a/pkg/minikube/bootstrapper/exec_runner.go b/pkg/minikube/bootstrapper/runner/exec_runner.go similarity index 99% rename from pkg/minikube/bootstrapper/exec_runner.go rename to pkg/minikube/bootstrapper/runner/exec_runner.go index 063748254c30..50f118a363a3 100644 --- a/pkg/minikube/bootstrapper/exec_runner.go +++ b/pkg/minikube/bootstrapper/runner/exec_runner.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package bootstrapper +package runner import ( "io" diff --git a/pkg/minikube/bootstrapper/fake_runner.go b/pkg/minikube/bootstrapper/runner/fake_runner.go similarity index 99% rename from pkg/minikube/bootstrapper/fake_runner.go rename to pkg/minikube/bootstrapper/runner/fake_runner.go index 79aebe6f7a60..ae379ec6b611 100644 --- a/pkg/minikube/bootstrapper/fake_runner.go +++ b/pkg/minikube/bootstrapper/runner/fake_runner.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package bootstrapper +package runner import ( "bytes" diff --git a/pkg/minikube/bootstrapper/ssh_runner.go b/pkg/minikube/bootstrapper/runner/ssh_runner.go similarity index 99% rename from pkg/minikube/bootstrapper/ssh_runner.go rename to pkg/minikube/bootstrapper/runner/ssh_runner.go index 74119ee5c5e7..2ef6238b5243 100644 --- a/pkg/minikube/bootstrapper/ssh_runner.go +++ b/pkg/minikube/bootstrapper/runner/ssh_runner.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package bootstrapper +package runner import ( "fmt" diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index 9da1fb22e23a..406cb2c7fc10 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -62,9 +62,10 @@ func init() { // StartHost starts a host VM. func StartHost(api libmachine.API, config MachineConfig) (*host.Host, error) { - exists, err := api.Exists(cfg.GetMachineName()) + + exists, err := api.Exists(config.MachineName) if err != nil { - return nil, errors.Wrapf(err, "Error checking if host exists: %s", cfg.GetMachineName()) + return nil, errors.Wrapf(err, "Error checking if host exists: %s", config.MachineName) } if !exists { glog.Infoln("Machine does not exist... provisioning new machine") @@ -74,7 +75,7 @@ func StartHost(api libmachine.API, config MachineConfig) (*host.Host, error) { glog.Infoln("Skipping create...Using existing machine configuration") } - h, err := api.Load(cfg.GetMachineName()) + h, err := api.Load(config.MachineName) if err != nil { return nil, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.") } @@ -103,46 +104,46 @@ func StartHost(api libmachine.API, config MachineConfig) (*host.Host, error) { } // StopHost stops the host VM. -func StopHost(api libmachine.API) error { - host, err := api.Load(cfg.GetMachineName()) +func StopHost(name string, api libmachine.API) error { + host, err := api.Load(name) if err != nil { - return errors.Wrapf(err, "Error loading host: %s", cfg.GetMachineName()) + return errors.Wrapf(err, "Error loading host: %s", name) } if err := host.Stop(); err != nil { alreadyInStateError, ok := err.(mcnerror.ErrHostAlreadyInState) if ok && alreadyInStateError.State == state.Stopped { return nil } - return errors.Wrapf(err, "Error stopping host: %s", cfg.GetMachineName()) + return errors.Wrapf(err, "Error stopping host: %s", name) } return nil } // DeleteHost deletes the host VM. -func DeleteHost(api libmachine.API) error { - host, err := api.Load(cfg.GetMachineName()) +func DeleteHost(name string, api libmachine.API) error { + host, err := api.Load(name) if err != nil { return errors.Wrapf(err, "Error deleting host: %s", cfg.GetMachineName()) } m := util.MultiError{} m.Collect(host.Driver.Remove()) - m.Collect(api.Remove(cfg.GetMachineName())) + m.Collect(api.Remove(name)) return m.ToError() } // GetHostStatus gets the status of the host VM. -func GetHostStatus(api libmachine.API) (string, error) { - exists, err := api.Exists(cfg.GetMachineName()) +func GetHostStatus(name string, api libmachine.API) (string, error) { + exists, err := api.Exists(name) if err != nil { - return "", errors.Wrapf(err, "Error checking that api exists for: %s", cfg.GetMachineName()) + return "", errors.Wrapf(err, "Error checking that api exists for: %s", name) } if !exists { return state.None.String(), nil } - host, err := api.Load(cfg.GetMachineName()) + host, err := api.Load(name) if err != nil { - return "", errors.Wrapf(err, "Error loading api for: %s", cfg.GetMachineName()) + return "", errors.Wrapf(err, "Error loading api for: %s", name) } s, err := host.Driver.GetState() @@ -152,9 +153,13 @@ func GetHostStatus(api libmachine.API) (string, error) { return s.String(), nil } -// GetHostDriverIP gets the ip address of the current minikube cluster func GetHostDriverIP(api libmachine.API) (net.IP, error) { - host, err := CheckIfApiExistsAndLoad(api) + return GetHostDriverIpByName(cfg.GetMachineName(), api) +} + +// GetHostDriverIP gets the ip address of the current minikube cluster +func GetHostDriverIpByName(name string, api libmachine.API) (net.IP, error) { + host, err := CheckIfApiExistsAndLoadByName(name, api) if err != nil { return nil, err } @@ -181,7 +186,7 @@ func engineOptions(config MachineConfig) *engine.Options { } func createVirtualboxHost(config MachineConfig) drivers.Driver { - d := virtualbox.NewDriver(cfg.GetMachineName(), constants.GetMinipath()) + d := virtualbox.NewDriver(config.MachineName, constants.GetMinipath()) d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) d.Memory = config.Memory d.CPU = config.CPUs @@ -262,8 +267,8 @@ To disable this message, run [minikube config set WantShowDriverDeprecationNotif } // GetHostDockerEnv gets the necessary docker env variables to allow the use of docker through minikube's vm -func GetHostDockerEnv(api libmachine.API) (map[string]string, error) { - host, err := CheckIfApiExistsAndLoad(api) +func GetHostDockerEnv(name string, api libmachine.API) (map[string]string, error) { + host, err := CheckIfApiExistsAndLoadByName(name, api) if err != nil { return nil, errors.Wrap(err, "Error checking that api exists and loading it") } @@ -357,23 +362,31 @@ func getIPForInterface(name string) (net.IP, error) { } func CheckIfApiExistsAndLoad(api libmachine.API) (*host.Host, error) { - exists, err := api.Exists(cfg.GetMachineName()) + return CheckIfApiExistsAndLoadByName(cfg.GetMachineName(), api) +} + +func CheckIfApiExistsAndLoadByName(name string, api libmachine.API) (*host.Host, error) { + exists, err := api.Exists(name) if err != nil { - return nil, errors.Wrapf(err, "Error checking that api exists for: %s", cfg.GetMachineName()) + return nil, errors.Wrapf(err, "Error checking that api exists for: %s", name) } if !exists { - return nil, errors.Errorf("Machine does not exist for api.Exists(%s)", cfg.GetMachineName()) + return nil, errors.Errorf("Machine does not exist for api.Exists(%s)", name) } - host, err := api.Load(cfg.GetMachineName()) + host, err := api.Load(name) if err != nil { - return nil, errors.Wrapf(err, "Error loading api for: %s", cfg.GetMachineName()) + return nil, errors.Wrapf(err, "Error loading api for: %s", name) } return host, nil } func CreateSSHShell(api libmachine.API, args []string) error { - host, err := CheckIfApiExistsAndLoad(api) + return CreateSSHShellByName(cfg.GetMachineName(), api, args) +} + +func CreateSSHShellByName(name string, api libmachine.API, args []string) error { + host, err := CheckIfApiExistsAndLoadByName(name, api) if err != nil { return errors.Wrap(err, "Error checking if api exist and loading it") } @@ -397,7 +410,7 @@ func CreateSSHShell(api libmachine.API, args []string) error { // EnsureMinikubeRunningOrExit checks that minikube has a status available and that // that the status is `Running`, otherwise it will exit func EnsureMinikubeRunningOrExit(api libmachine.API, exitStatus int) { - s, err := GetHostStatus(api) + s, err := GetHostStatus(cfg.GetMachineName(), api) if err != nil { glog.Errorln("Error getting machine status:", err) os.Exit(1) diff --git a/pkg/minikube/cluster/cluster_test.go b/pkg/minikube/cluster/cluster_test.go index f8729100d78c..89fe3a5bd23c 100644 --- a/pkg/minikube/cluster/cluster_test.go +++ b/pkg/minikube/cluster/cluster_test.go @@ -36,6 +36,7 @@ func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil } var defaultMachineConfig = MachineConfig{ + MachineName: "test-cluster", VMDriver: constants.DefaultVMDriver, MinikubeISO: constants.DefaultIsoUrl, Downloader: MockDownloader{}, @@ -44,7 +45,7 @@ var defaultMachineConfig = MachineConfig{ func TestCreateHost(t *testing.T) { api := tests.NewMockAPI() - exists, _ := api.Exists(config.GetMachineName()) + exists, _ := api.Exists(defaultMachineConfig.MachineName) if exists { t.Fatal("Machine already exists.") } @@ -52,12 +53,12 @@ func TestCreateHost(t *testing.T) { if err != nil { t.Fatalf("Error creating host: %v", err) } - exists, _ = api.Exists(config.GetMachineName()) + exists, _ = api.Exists(defaultMachineConfig.MachineName) if !exists { t.Fatal("Machine does not exist, but should.") } - h, err := api.Load(config.GetMachineName()) + h, err := api.Load(defaultMachineConfig.MachineName) if err != nil { t.Fatalf("Error loading machine: %v", err) } @@ -101,7 +102,7 @@ func TestStartHostExists(t *testing.T) { if err != nil { t.Fatal("Error starting host.") } - if h.Name != config.GetMachineName() { + if h.Name != defaultMachineConfig.MachineName { t.Fatalf("Machine created with incorrect name: %s", h.Name) } if s, _ := h.Driver.GetState(); s != state.Running { @@ -129,7 +130,7 @@ func TestStartStoppedHost(t *testing.T) { if err != nil { t.Fatal("Error starting host.") } - if h.Name != config.GetMachineName() { + if h.Name != defaultMachineConfig.MachineName { t.Fatalf("Machine created with incorrect name: %s", h.Name) } @@ -156,7 +157,7 @@ func TestStartHost(t *testing.T) { if err != nil { t.Fatal("Error starting host.") } - if h.Name != config.GetMachineName() { + if h.Name != defaultMachineConfig.MachineName { t.Fatalf("Machine created with incorrect name: %s", h.Name) } if exists, _ := api.Exists(h.Name); !exists { @@ -207,7 +208,7 @@ func TestStartHostConfig(t *testing.T) { func TestStopHostError(t *testing.T) { api := tests.NewMockAPI() - if err := StopHost(api); err == nil { + if err := StopHost(defaultMachineConfig.MachineName, api); err == nil { t.Fatal("An error should be thrown when stopping non-existing machine.") } } @@ -215,9 +216,13 @@ func TestStopHostError(t *testing.T) { func TestStopHost(t *testing.T) { api := tests.NewMockAPI() h, _ := createHost(api, defaultMachineConfig) - if err := StopHost(api); err != nil { + if err := StopHost("some-other-machine", api); err == nil { t.Fatal("An error should be thrown when stopping non-existing machine.") } + if err := StopHost(defaultMachineConfig.MachineName, api); err != nil { + t.Fatal("An error should not be thrown when stopping existing machine.") + } + if s, _ := h.Driver.GetState(); s != state.Stopped { t.Fatalf("Machine not stopped. Currently in state: %s", s) } @@ -227,7 +232,7 @@ func TestDeleteHost(t *testing.T) { api := tests.NewMockAPI() createHost(api, defaultMachineConfig) - if err := DeleteHost(api); err != nil { + if err := DeleteHost(defaultMachineConfig.MachineName, api); err != nil { t.Fatalf("Unexpected error deleting host: %s", err) } } @@ -240,7 +245,7 @@ func TestDeleteHostErrorDeletingVM(t *testing.T) { h.Driver = d - if err := DeleteHost(api); err == nil { + if err := DeleteHost(defaultMachineConfig.MachineName, api); err == nil { t.Fatal("Expected error deleting host.") } } @@ -250,7 +255,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) { api.RemoveError = true createHost(api, defaultMachineConfig) - if err := DeleteHost(api); err == nil { + if err := DeleteHost(defaultMachineConfig.MachineName, api); err == nil { t.Fatal("Expected error deleting host.") } } @@ -264,13 +269,13 @@ func TestDeleteHostMultipleErrors(t *testing.T) { h.Driver = d - err := DeleteHost(api) + err := DeleteHost(defaultMachineConfig.MachineName, api) if err == nil { t.Fatal("Expected error deleting host, didn't get one.") } - expectedErrors := []string{"Error removing " + config.GetMachineName(), "Error deleting machine"} + expectedErrors := []string{"Error removing " + defaultMachineConfig.MachineName, "Error deleting machine"} for _, expectedError := range expectedErrors { if !strings.Contains(err.Error(), expectedError) { t.Fatalf("Error %s expected to contain: %s.", err, expectedError) @@ -282,7 +287,7 @@ func TestGetHostStatus(t *testing.T) { api := tests.NewMockAPI() checkState := func(expected string) { - s, err := GetHostStatus(api) + s, err := GetHostStatus(defaultMachineConfig.MachineName, api) if err != nil { t.Fatalf("Unexpected error getting status: %s", err) } @@ -296,7 +301,7 @@ func TestGetHostStatus(t *testing.T) { createHost(api, defaultMachineConfig) checkState(state.Running.String()) - StopHost(api) + StopHost(defaultMachineConfig.MachineName, api) checkState(state.Stopped.String()) } @@ -316,7 +321,7 @@ func TestGetHostDockerEnv(t *testing.T) { } h.Driver = d - envMap, err := GetHostDockerEnv(api) + envMap, err := GetHostDockerEnv(defaultMachineConfig.MachineName, api) if err != nil { t.Fatalf("Unexpected error getting env: %s", err) } @@ -349,7 +354,7 @@ func TestGetHostDockerEnvIPv6(t *testing.T) { } h.Driver = d - envMap, err := GetHostDockerEnv(api) + envMap, err := GetHostDockerEnv(defaultMachineConfig.MachineName, api) if err != nil { t.Fatalf("Unexpected error getting env: %s", err) } diff --git a/pkg/minikube/cluster/types.go b/pkg/minikube/cluster/types.go index e535d05f96d6..f17ba9aad2d2 100644 --- a/pkg/minikube/cluster/types.go +++ b/pkg/minikube/cluster/types.go @@ -17,12 +17,14 @@ limitations under the License. package cluster import ( + "k8s.io/minikube/pkg/minikube" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/util" ) // MachineConfig contains the parameters used to start a cluster. type MachineConfig struct { + MachineName string MinikubeISO string Memory int CPUs int @@ -45,6 +47,8 @@ type MachineConfig struct { // Config contains machine and k8s config type Config struct { + ClusterName string MachineConfig MachineConfig KubernetesConfig bootstrapper.KubernetesConfig + Nodes []minikube.NodeConfig } diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index bf352badaba3..1f567e1c58e8 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -117,6 +117,11 @@ func GetProfileFile(profile string) string { return filepath.Join(GetMinipath(), "profiles", profile, "config.json") } +func GetProfileFiles() []string { + files, _ := filepath.Glob(filepath.Join(GetMinipath(), "profiles", "*", "config.json")) + return files +} + var LocalkubeDownloadURLPrefix = "https://storage.googleapis.com/minikube/k8sReleases/" var LocalkubeLinuxFilename = "localkube-linux-amd64" diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index dcf4ec447e0d..dbfb516f05e5 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -28,6 +28,7 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/sshutil" @@ -80,8 +81,9 @@ func CacheImages(images []string, cacheDir string) error { return nil } -func LoadImages(cmd bootstrapper.CommandRunner, images []string, cacheDir string) error { +func LoadImages(cmd runner.CommandRunner, images []string, cacheDir string) error { var g errgroup.Group + glog.Infof("Loading cached images: %s", images) for _, image := range images { image := image g.Go(func() error { @@ -118,7 +120,7 @@ func CacheAndLoadImages(images []string) error { if err != nil { return err } - cmdRunner, err := bootstrapper.NewSSHRunner(client), nil + cmdRunner, err := runner.NewSSHRunner(client), nil if err != nil { return err } @@ -190,7 +192,7 @@ func getWindowsVolumeNameCmd(d string) (string, error) { return vname, nil } -func LoadFromCacheBlocking(cmd bootstrapper.CommandRunner, src string) error { +func LoadFromCacheBlocking(cmd runner.CommandRunner, src string) error { glog.Infoln("Loading image from cache at ", src) filename := filepath.Base(src) for { diff --git a/pkg/minikube/machine/client.go b/pkg/minikube/machine/client.go index 56e4906c1434..7cdc577cb33f 100644 --- a/pkg/minikube/machine/client.go +++ b/pkg/minikube/machine/client.go @@ -25,7 +25,7 @@ import ( "path/filepath" "time" - "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/sshutil" "k8s.io/minikube/pkg/provision" @@ -154,16 +154,16 @@ func (api *LocalClient) Load(name string) (*host.Host, error) { return h, nil } -func GetCommandRunner(h *host.Host) (bootstrapper.CommandRunner, error) { +func GetCommandRunner(h *host.Host) (runner.CommandRunner, error) { if h.DriverName != constants.DriverNone { client, err := sshutil.NewSSHClient(h.Driver) if err != nil { return nil, errors.Wrap(err, "getting ssh client for bootstrapper") } - return bootstrapper.NewSSHRunner(client), nil + return runner.NewSSHRunner(client), nil } - return &bootstrapper.ExecRunner{}, nil + return &runner.ExecRunner{}, nil } func (api *LocalClient) Close() error { diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go new file mode 100644 index 000000000000..124fa8969674 --- /dev/null +++ b/pkg/minikube/node/node.go @@ -0,0 +1,138 @@ +package node + +import ( + "fmt" + + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/state" + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube" + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" + "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/sshutil" + "k8s.io/minikube/pkg/util" +) + +func NewNode( + config minikube.NodeConfig, + baseConfig cluster.MachineConfig, + clusterName string, + api libmachine.API, +) minikube.Node { + return &node{ + api: api, + config: config, + baseConfig: baseConfig, + clusterName: clusterName, + } +} + +type node struct { + api libmachine.API + config minikube.NodeConfig + baseConfig cluster.MachineConfig + clusterName string +} + +func (n *node) Config() minikube.NodeConfig { + return n.config +} + +func (n *node) IP() (string, error) { + host, err := cluster.CheckIfApiExistsAndLoadByName(n.MachineName(), n.api) + if err != nil { + return "", err + } + + ip, err := host.Driver.GetIP() + return ip, errors.Wrap(err, "Error getting IP") +} + +func (n *node) MachineName() string { + return fmt.Sprintf("%s-%s", n.clusterName, n.config.Name) +} + +func (n *node) Name() string { + return n.config.Name +} + +func (n *node) Start() error { + _, err := cluster.StartHost(n.api, n.machineConfig()) + if err != nil { + return err + } + return nil +} + +func (n *node) Stop() error { + return fmt.Errorf("Not implemented yet") +} + +func (n *node) Status() (minikube.NodeStatus, error) { + s, err := n.status() + return s, errors.Wrap(err, "getting node status") +} + +func (n *node) Runner() (runner.CommandRunner, error) { + h, err := n.api.Load(n.MachineName()) + if err != nil { + return nil, errors.Wrap(err, "loading host") + } + + // The none driver executes commands directly on the host + if h.Driver.DriverName() == constants.DriverNone { + return &runner.ExecRunner{}, nil + } + client, err := sshutil.NewSSHClient(h.Driver) + if err != nil { + return nil, errors.Wrap(err, "getting ssh client") + } + return runner.NewSSHRunner(client), nil +} + +func (n *node) machineConfig() cluster.MachineConfig { + cfg := n.baseConfig + cfg.Downloader = util.DefaultDownloader{} + cfg.MachineName = n.MachineName() + return cfg +} + +func (n *node) status() (minikube.NodeStatus, error) { + if exists, err := n.api.Exists(n.MachineName()); err == nil && !exists { + return minikube.StatusNotCreated, nil + } else if err != nil { + return minikube.NodeStatus(""), err + } + + host, err := n.api.Load(n.MachineName()) + if err != nil { + return minikube.NodeStatus(""), err + } + + s, err := host.Driver.GetState() + if err != nil { + return minikube.NodeStatus(""), err + } + + switch s { + case state.Running: + return minikube.StatusRunning, nil + case state.Starting: + return minikube.StatusRunning, nil + case state.Stopping: + return minikube.StatusRunning, nil + case state.Stopped: + return minikube.StatusStopped, nil + case state.Paused: + return minikube.StatusStopped, nil + case state.Saved: + return minikube.StatusStopped, nil + case state.Error: + return minikube.NodeStatus(""), errors.Errorf("Error state %s from libmachine", s) + case state.Timeout: + return minikube.NodeStatus(""), errors.Errorf("Error state %s from libmachine", s) + default: + return minikube.NodeStatus(""), errors.Errorf("Unknown state %s from libmachine", s) + } +} diff --git a/pkg/minikube/types.go b/pkg/minikube/types.go new file mode 100644 index 000000000000..4c91423bbdac --- /dev/null +++ b/pkg/minikube/types.go @@ -0,0 +1,32 @@ +package minikube + +import ( + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" +) + +const ( + StatusStopped = "Stopped" + StatusRunning = "Running" + StatusNotCreated = "NotCreated" +) + +type NodeStatus string + +type NodeConfig struct { + Name string +} + +type Node interface { + Config() NodeConfig + Start() error + Stop() error + Status() (NodeStatus, error) + Runner() (runner.CommandRunner, error) + MachineName() string + Name() string + IP() (string, error) +} + +type Bootstrapper interface { + Bootstrap(Node) error +} diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 5534caf187bb..6f5b918b3b65 100755 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -36,7 +36,7 @@ import ( "github.com/docker/machine/libmachine/swarm" "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/assets" - "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/sshutil" "k8s.io/minikube/pkg/util" ) @@ -224,7 +224,7 @@ func configureAuth(p *BuildrootProvisioner) error { return errors.Wrap(err, "error getting ip during provisioning") } - execRunner := &bootstrapper.ExecRunner{} + execRunner := &runner.ExecRunner{} hostCerts := map[string]string{ authOptions.CaCertPath: path.Join(authOptions.StorePath, "ca.pem"), authOptions.ClientCertPath: path.Join(authOptions.StorePath, "cert.pem"), @@ -275,7 +275,7 @@ func configureAuth(p *BuildrootProvisioner) error { if err != nil { return errors.Wrap(err, "provisioning: error getting ssh client") } - sshRunner := bootstrapper.NewSSHRunner(sshClient) + sshRunner := runner.NewSSHRunner(sshClient) for src, dst := range remoteCerts { f, err := assets.NewFileAsset(src, path.Dir(dst), filepath.Base(dst), "0640") if err != nil { diff --git a/vendor/github.com/elgs/gostrgen/.gitignore b/vendor/github.com/elgs/gostrgen/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/vendor/github.com/elgs/gostrgen/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/elgs/gostrgen/README.md b/vendor/github.com/elgs/gostrgen/README.md new file mode 100644 index 000000000000..82458955f291 --- /dev/null +++ b/vendor/github.com/elgs/gostrgen/README.md @@ -0,0 +1,33 @@ +# gostrgen +Random string generator in Golang. + +#Installation +`go get -u github.com/elgs/gostrgen` + +# Sample code +```go +package main + +import ( + "fmt" + "github.com/elgs/gostrgen" +) + +func main() { + + // possible character sets are: + // Lower, Upper, Digit, Punct, LowerUpper, LowerDigit, UpperDigit, LowerUpperDigit, All and None. + // Any of the above can be combine by "|", e.g. LowerUpper is the same as Lower | Upper + + charsToGenerate := 20 + charSet := gostrgen.Lower | gostrgen.Digit + includes := "[]{}<>" // optionally include some additional letters + excludes := "Ol" //exclude big 'O' and small 'l' to avoid confusion with zero and one. + + str, err := gostrgen.RandGen(charsToGenerate, charSet, includes, excludes) + if err != nil { + fmt.Println(err) + } + fmt.Println(str) // zxh9[pvoxbaup32b7s0d +} +``` \ No newline at end of file diff --git a/vendor/github.com/elgs/gostrgen/gostrgen.go b/vendor/github.com/elgs/gostrgen/gostrgen.go new file mode 100644 index 000000000000..8463c970a1bf --- /dev/null +++ b/vendor/github.com/elgs/gostrgen/gostrgen.go @@ -0,0 +1,61 @@ +// gostrgen +package gostrgen + +import ( + "errors" + "math/rand" + "strings" + "time" +) + +const None = 0 +const Lower = 1 << 0 +const Upper = 1 << 1 +const Digit = 1 << 2 +const Punct = 1 << 3 + +const LowerUpper = Lower | Upper +const LowerDigit = Lower | Digit +const UpperDigit = Upper | Digit +const LowerUpperDigit = LowerUpper | Digit +const All = LowerUpperDigit | Punct + +const lower = "abcdefghijklmnopqrstuvwxyz" +const upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +const digit = "0123456789" +const punct = "~!@#$%^&*()_+-=" + +func init() { + rand.Seed(time.Now().UTC().UnixNano()) +} + +func RandGen(size int, set int, include string, exclude string) (string, error) { + all := include + if set&Lower > 0 { + all += lower + } + if set&Upper > 0 { + all += upper + } + if set&Digit > 0 { + all += digit + } + if set&Punct > 0 { + all += punct + } + + lenAll := len(all) + if len(exclude) >= lenAll { + return "", errors.New("Too much to exclude.") + } + buf := make([]byte, size) + for i := 0; i < size; i++ { + b := all[rand.Intn(lenAll)] + if strings.Contains(exclude, string(b)) { + i-- + continue + } + buf[i] = b + } + return string(buf), nil +}