diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index 256510485686..2a4efb31c0df 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -280,7 +280,7 @@ var dockerEnvCmd = &cobra.Command{ exit.Message(reason.EnvMultiConflict, `The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`) } - if co.Config.KubernetesConfig.ContainerRuntime != "docker" { + if co.Config.KubernetesConfig.ContainerRuntime != constants.Docker { exit.Message(reason.Usage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`, out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime}) } diff --git a/cmd/minikube/cmd/podman-env.go b/cmd/minikube/cmd/podman-env.go index eacf79dd187c..bf0e9229f7eb 100644 --- a/cmd/minikube/cmd/podman-env.go +++ b/cmd/minikube/cmd/podman-env.go @@ -170,6 +170,11 @@ var podmanEnvCmd = &cobra.Command{ exit.Message(reason.Usage, `The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`) } + if co.Config.KubernetesConfig.ContainerRuntime != constants.CRIO { + exit.Message(reason.Usage, `The podman-env command is only compatible with the "crio" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`, + out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime}) + } + r := co.CP.Runner if ok := isPodmanAvailable(r); !ok { exit.Message(reason.EnvPodmanUnavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname}) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 990341c7671c..ac96b53d9292 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -188,6 +188,7 @@ func runStart(cmd *cobra.Command, args []string) { validateSpecifiedDriver(existing) validateKubernetesVersion(existing) + validateContainerRuntime(existing) ds, alts, specified := selectDriver(existing) if cmd.Flag(kicBaseImage).Changed { @@ -270,7 +271,7 @@ func runStart(cmd *cobra.Command, args []string) { exit.Error(reason.GuestStart, "failed to start node", err) } - if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil { + if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Node.ContainerRuntime, starter.Cfg.Name); err != nil { klog.Errorf("kubectl info: %v", err) } } @@ -302,7 +303,8 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * } k8sVersion := getKubernetesVersion(existing) - cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName) + rtime := getContainerRuntime(existing) + cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, rtime, driverName) if err != nil { return node.Starter{}, errors.Wrap(err, "Failed to generate config") } @@ -381,6 +383,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. Worker: true, ControlPlane: false, KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, + ContainerRuntime: starter.Cfg.KubernetesConfig.ContainerRuntime, } out.Ln("") // extra newline for clarity on the command line err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) @@ -439,15 +442,15 @@ func displayEnviron(env []string) { } } -func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion, machineName string) error { +func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion, rtime, machineName string) error { if k8sVersion == constants.NoKubernetesVersion { register.Reg.SetStep(register.Done) out.Step(style.Ready, "Done! minikube is ready without Kubernetes!") // Runtime message. boxConfig := box.Config{Py: 1, Px: 4, Type: "Round", Color: "Green"} - switch viper.GetString(containerRuntime) { - case constants.DefaultContainerRuntime: + switch rtime { + case constants.Docker: out.BoxedWithConfig(boxConfig, style.Tip, "Things to try without Kubernetes ...", `- "minikube ssh" to SSH into minikube's node. - "minikube docker-env" to point your docker-cli to the docker inside minikube. - "minikube image" to build images without docker.`) @@ -1183,9 +1186,10 @@ func validateFlags(cmd *cobra.Command, drvName string) { exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName}) } - runtime := viper.GetString(containerRuntime) - if runtime != "docker" { - out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime}) + // default container runtime varies, starting with Kubernetes 1.24 - assume that only the default container runtime has been tested + rtime := viper.GetString(containerRuntime) + if rtime != constants.DefaultContainerRuntime && rtime != defaultRuntime(getKubernetesVersion(nil)) { + out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": rtime}) } // conntrack is required starting with Kubernetes 1.18, include the release candidates for completion @@ -1285,6 +1289,10 @@ func validateRuntime(rtime string) error { // `crio` is accepted as an alternative spelling to `cri-o` validOptions = append(validOptions, constants.CRIO) + if rtime == constants.DefaultContainerRuntime { + return nil + } + var validRuntime bool for _, option := range validOptions { if rtime == option { @@ -1308,9 +1316,31 @@ func validateRuntime(rtime string) error { return nil } +func getContainerRuntime(old *config.ClusterConfig) string { + paramRuntime := viper.GetString(containerRuntime) + + // try to load the old version first if the user didn't specify anything + if paramRuntime == constants.DefaultContainerRuntime && old != nil { + paramRuntime = old.KubernetesConfig.ContainerRuntime + } + + if paramRuntime == constants.DefaultContainerRuntime { + k8sVersion := getKubernetesVersion(old) + paramRuntime = defaultRuntime(k8sVersion) + } + + return paramRuntime +} + +// defaultRuntime returns the default container runtime +func defaultRuntime(k8sVersion string) string { + // minikube default + return constants.Docker +} + // if container runtime is not docker, check that cni is not disabled func validateCNI(cmd *cobra.Command, runtime string) { - if runtime == "docker" { + if runtime == constants.Docker { return } if cmd.Flags().Changed(cniFlag) && strings.ToLower(viper.GetString(cniFlag)) == "false" { @@ -1458,6 +1488,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C if existing != nil { cp, err := config.PrimaryControlPlane(existing) cp.KubernetesVersion = getKubernetesVersion(&cc) + cp.ContainerRuntime = getContainerRuntime(&cc) if err != nil { return cc, config.Node{}, err } @@ -1467,6 +1498,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C nodes := []config.Node{} for _, n := range existing.Nodes { n.KubernetesVersion = getKubernetesVersion(&cc) + n.ContainerRuntime = getContainerRuntime(&cc) nodes = append(nodes, n) } cc.Nodes = nodes @@ -1477,6 +1509,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C cp := config.Node{ Port: cc.KubernetesConfig.NodePort, KubernetesVersion: getKubernetesVersion(&cc), + ContainerRuntime: getContainerRuntime(&cc), Name: kubeNodeName, ControlPlane: true, Worker: true, @@ -1573,6 +1606,17 @@ func validateKubernetesVersion(old *config.ClusterConfig) { } } +// validateContainerRuntime ensures that the container runtime is reasonable +func validateContainerRuntime(old *config.ClusterConfig) { + if old == nil || old.KubernetesConfig.ContainerRuntime == "" { + return + } + + if err := validateRuntime(old.KubernetesConfig.ContainerRuntime); err != nil { + klog.Errorf("Error parsing old runtime %q: %v", old.KubernetesConfig.ContainerRuntime, err) + } +} + func isBaseImageApplicable(drv string) bool { return registry.IsKIC(drv) } diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 85fbd9ff889d..3d8a69ce6853 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -160,7 +160,7 @@ func initMinikubeFlags() { startCmd.Flags().String(kicBaseImage, kic.BaseImage, "The base image to use for docker/podman drivers. Intended for local development.") startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.") startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.") - startCmd.Flags().String(containerRuntime, constants.DefaultContainerRuntime, fmt.Sprintf("The container runtime to be used (%s).", strings.Join(cruntime.ValidRuntimes(), ", "))) + startCmd.Flags().String(containerRuntime, constants.DefaultContainerRuntime, fmt.Sprintf("The container runtime to be used. Valid options: %s (default: auto)", strings.Join(cruntime.ValidRuntimes(), ", "))) startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.") startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.") startCmd.Flags().String(mount9PVersion, defaultMount9PVersion, mount9PVersionDescription) @@ -274,7 +274,7 @@ func ClusterFlagValue() string { } // generateClusterConfig generate a config.ClusterConfig based on flags or existing cluster config -func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) { +func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k8sVersion string, rtime string, drvName string) (config.ClusterConfig, config.Node, error) { var cc config.ClusterConfig if existing != nil { cc = updateExistingConfigFromFlags(cmd, existing) @@ -286,7 +286,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k } } else { klog.Info("no existing cluster config was found, will generate one from the flags ") - cc = generateNewConfigFromFlags(cmd, k8sVersion, drvName) + cc = generateNewConfigFromFlags(cmd, k8sVersion, rtime, drvName) cnm, err := cni.New(&cc) if err != nil { @@ -444,7 +444,7 @@ func getCNIConfig(cmd *cobra.Command) string { } // generateNewConfigFromFlags generate a config.ClusterConfig based on flags -func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) config.ClusterConfig { +func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, rtime string, drvName string) config.ClusterConfig { var cc config.ClusterConfig // networkPlugin cni deprecation warning @@ -526,7 +526,7 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s APIServerIPs: apiServerIPs, DNSDomain: viper.GetString(dnsDomain), FeatureGates: viper.GetString(featureGates), - ContainerRuntime: viper.GetString(containerRuntime), + ContainerRuntime: rtime, CRISocket: viper.GetString(criSocket), NetworkPlugin: chosenNetworkPlugin, ServiceCIDR: viper.GetString(serviceCIDR), @@ -549,7 +549,7 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)}) } if si.Rootless { - if cc.KubernetesConfig.ContainerRuntime == "docker" { + if cc.KubernetesConfig.ContainerRuntime == constants.Docker { exit.Message(reason.Usage, "--container-runtime must be set to \"containerd\" or \"cri-o\" for rootless") } // KubeletInUserNamespace feature gate is essential for rootless driver. @@ -729,6 +729,9 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC if cmd.Flags().Changed(kubernetesVersion) { cc.KubernetesConfig.KubernetesVersion = getKubernetesVersion(existing) } + if cmd.Flags().Changed(containerRuntime) { + cc.KubernetesConfig.ContainerRuntime = getContainerRuntime(existing) + } if cmd.Flags().Changed("extra-config") { cc.KubernetesConfig.ExtraOptions = getExtraOptions() diff --git a/cmd/minikube/cmd/start_test.go b/cmd/minikube/cmd/start_test.go index 54133d85e6de..c67bc2c12283 100644 --- a/cmd/minikube/cmd/start_test.go +++ b/cmd/minikube/cmd/start_test.go @@ -112,6 +112,7 @@ func TestMirrorCountry(t *testing.T) { viper.SetDefault(humanReadableDiskSize, defaultDiskSize) checkRepository = checkRepoMock k8sVersion := constants.DefaultKubernetesVersion + rtime := constants.DefaultContainerRuntime var tests = []struct { description string k8sVersion string @@ -157,7 +158,7 @@ func TestMirrorCountry(t *testing.T) { viper.SetDefault(imageRepository, test.imageRepository) viper.SetDefault(imageMirrorCountry, test.mirrorCountry) viper.SetDefault(kvmNUMACount, 1) - config, _, err := generateClusterConfig(cmd, nil, k8sVersion, driver.Mock) + config, _, err := generateClusterConfig(cmd, nil, k8sVersion, rtime, driver.Mock) if err != nil { t.Fatalf("Got unexpected error %v during config generation", err) } @@ -179,6 +180,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) { } }() k8sVersion := constants.NewestKubernetesVersion + rtime := constants.DefaultContainerRuntime var tests = []struct { description string proxy string @@ -226,7 +228,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) { cfg.DockerEnv = []string{} // clear docker env to avoid pollution proxy.SetDockerEnv() - config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none") + config, _, err := generateClusterConfig(cmd, nil, k8sVersion, rtime, "none") if err != nil { t.Fatalf("Got unexpected error %v during config generation", err) } diff --git a/pkg/minikube/cni/cni.go b/pkg/minikube/cni/cni.go index 127afc8ef9ce..e648a20e0606 100644 --- a/pkg/minikube/cni/cni.go +++ b/pkg/minikube/cni/cni.go @@ -155,7 +155,7 @@ func chooseDefault(cc config.ClusterConfig) Manager { return KindNet{cc: cc} } - if cc.KubernetesConfig.ContainerRuntime != "docker" { + if cc.KubernetesConfig.ContainerRuntime != constants.Docker { if driver.IsKIC(cc.Driver) { klog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime) return KindNet{cc: cc} diff --git a/pkg/minikube/cni/disabled.go b/pkg/minikube/cni/disabled.go index 0832798fce37..5c273ef12162 100644 --- a/pkg/minikube/cni/disabled.go +++ b/pkg/minikube/cni/disabled.go @@ -19,6 +19,7 @@ package cni import ( "k8s.io/klog/v2" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" ) @@ -34,7 +35,7 @@ func (c Disabled) String() string { // Apply enables the CNI func (c Disabled) Apply(r Runner) error { - if driver.IsKIC(c.cc.Driver) && c.cc.KubernetesConfig.ContainerRuntime != "docker" { + if driver.IsKIC(c.cc.Driver) && c.cc.KubernetesConfig.ContainerRuntime != constants.Docker { klog.Warningf("CNI is recommended for %q driver and %q runtime - expect networking issues", c.cc.Driver, c.cc.KubernetesConfig.ContainerRuntime) } diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 155dd891b8b4..55b163d80929 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -63,6 +63,7 @@ func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { IP: cc.KubernetesConfig.NodeIP, Port: cc.KubernetesConfig.NodePort, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, + ContainerRuntime: cc.KubernetesConfig.ContainerRuntime, ControlPlane: true, Worker: true, } diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 63822793c974..513a61ded6f7 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -136,6 +136,7 @@ type Node struct { IP string Port int KubernetesVersion string + ContainerRuntime string ControlPlane bool Worker bool } diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 1300e2aa9bcd..324845f65117 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -60,8 +60,10 @@ const ( Containerd = "containerd" // CRIO is the default name and spelling for the cri-o container runtime CRIO = "crio" + // Docker is the default name and spelling for the docker container runtime + Docker = "docker" // DefaultContainerRuntime is our default container runtime - DefaultContainerRuntime = "docker" + DefaultContainerRuntime = "" // APIServerName is the default API server name APIServerName = "minikubeCA" diff --git a/pkg/minikube/download/download_test.go b/pkg/minikube/download/download_test.go index e9b03a74f76f..e126b4532139 100644 --- a/pkg/minikube/download/download_test.go +++ b/pkg/minikube/download/download_test.go @@ -94,7 +94,7 @@ func testPreloadDownloadPreventsMultipleDownload(t *testing.T) { var group sync.WaitGroup group.Add(2) dlCall := func() { - if err := Preload(constants.DefaultKubernetesVersion, constants.DefaultContainerRuntime, "docker"); err != nil { + if err := Preload(constants.DefaultKubernetesVersion, constants.Docker, "docker"); err != nil { t.Logf("Failed to download preload: %+v (may be ok)", err) } group.Done() @@ -119,7 +119,7 @@ func testPreloadNotExists(t *testing.T) { getChecksum = func(k8sVersion, containerRuntime string) ([]byte, error) { return []byte("check"), nil } ensureChecksumValid = func(k8sVersion, containerRuntime, path string, checksum []byte) error { return nil } - err := Preload(constants.DefaultKubernetesVersion, constants.DefaultContainerRuntime, "docker") + err := Preload(constants.DefaultKubernetesVersion, constants.Docker, "docker") if err != nil { t.Errorf("Expected no error when preload exists") } @@ -140,7 +140,7 @@ func testPreloadChecksumMismatch(t *testing.T) { return fmt.Errorf("checksum mismatch") } - err := Preload(constants.DefaultKubernetesVersion, constants.DefaultContainerRuntime, "docker") + err := Preload(constants.DefaultKubernetesVersion, constants.Docker, "docker") expectedErrMsg := "checksum mismatch" if err == nil { t.Errorf("Expected error when checksum mismatches") diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index 75476d07e022..ffaf8f2ab233 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -31,7 +31,7 @@ minikube start [flags] --cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true) --cert-expiration duration Duration until minikube certificate expiration, defaults to three years (26280h). (default 26280h0m0s) --cni string CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto) - --container-runtime string The container runtime to be used (docker, cri-o, containerd). (default "docker") + --container-runtime string The container runtime to be used. Valid options: docker, cri-o, containerd (default: auto) --cpus string Number of CPUs allocated to Kubernetes. Use "max" to use the maximum number of CPUs. (default "2") --cri-socket string The cri socket path to be used. --delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false. diff --git a/site/content/en/docs/handbook/config.md b/site/content/en/docs/handbook/config.md index b10533cc213a..476641b6aded 100644 --- a/site/content/en/docs/handbook/config.md +++ b/site/content/en/docs/handbook/config.md @@ -89,7 +89,7 @@ minikube start --extra-config=kubeadm.ignore-preflight-errors=SystemVerification ## Runtime configuration -The default container runtime in minikube is Docker. You can select it explicitly by using: +The default container runtime in minikube varies. You can select one explicitly by using: ```shell minikube start --container-runtime=docker @@ -100,6 +100,8 @@ Other options available are: * [containerd](https://github.com/containerd/containerd) * [cri-o](https://github.com/cri-o/cri-o) +See + ## Environment variables minikube supports passing environment variables instead of flags for every value listed in `minikube config`. This is done by passing an environment variable with the prefix `MINIKUBE_`. diff --git a/test/integration/main_test.go b/test/integration/main_test.go index 67e85b6b8bb5..8863c7f1056a 100644 --- a/test/integration/main_test.go +++ b/test/integration/main_test.go @@ -162,7 +162,7 @@ func ContainerRuntime() string { return strings.TrimPrefix(s, flag) } } - return constants.DefaultContainerRuntime + return constants.Docker } // arm64Platform returns true if running on arm64/* platform diff --git a/translations/strings.txt b/translations/strings.txt index a699e75bcb05..178ceab29ce8 100644 --- a/translations/strings.txt +++ b/translations/strings.txt @@ -650,6 +650,7 @@ "The path on the file system where the testing docs in markdown need to be saved": "", "The podman service within '{{.cluster}}' is not active": "", "The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "", + "The podman-env command is only compatible with the \"crio\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "", "The service namespace": "", "The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",