diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 33440657943d..07da8bf080c6 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -18,6 +18,7 @@ package cmd import ( "github.com/spf13/cobra" + "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" @@ -54,6 +55,11 @@ var nodeAddCmd = &cobra.Command{ KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } + // Make sure to decrease the default amount of memory we use per VM if this is the first worker node + if len(cc.Nodes) == 1 && viper.GetString(memory) == "" { + cc.Memory = 2200 + } + if err := node.Add(cc, n); err != nil { _, err := maybeDeleteAndRetry(*cc, n, nil, err) if err != nil { diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 4acabcc96988..7a0faac6f3ba 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -715,7 +715,7 @@ func memoryLimits(drvName string) (int, int, error) { } // suggestMemoryAllocation calculates the default memory footprint in MB -func suggestMemoryAllocation(sysLimit int, containerLimit int) int { +func suggestMemoryAllocation(sysLimit int, containerLimit int, nodes int) int { if mem := viper.GetInt(memory); mem != 0 { return mem } @@ -737,6 +737,10 @@ func suggestMemoryAllocation(sysLimit int, containerLimit int) int { // Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number! suggested := int(float32(sysLimit)/400.0) * 100 + if nodes > 1 { + suggested /= nodes + } + if suggested > maximum { return maximum } diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 329403622828..57417471c663 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -220,7 +220,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k glog.Warningf("Unable to query memory limits: %v", err) } - mem := suggestMemoryAllocation(sysLimit, containerLimit) + mem := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes)) if cmd.Flags().Changed(memory) { mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory)) if err != nil { diff --git a/cmd/minikube/cmd/start_test.go b/cmd/minikube/cmd/start_test.go index 9c6dfa93a1af..3e3e6597021a 100644 --- a/cmd/minikube/cmd/start_test.go +++ b/cmd/minikube/cmd/start_test.go @@ -185,25 +185,34 @@ func TestSuggestMemoryAllocation(t *testing.T) { description string sysLimit int containerLimit int + nodes int want int }{ - {"128GB sys", 128000, 0, 6000}, - {"64GB sys", 64000, 0, 6000}, - {"16GB sys", 16384, 0, 4000}, - {"odd sys", 14567, 0, 3600}, - {"4GB sys", 4096, 0, 2200}, - {"2GB sys", 2048, 0, 2048}, - {"Unable to poll sys", 0, 0, 2200}, - {"128GB sys, 16GB container", 128000, 16384, 16336}, - {"64GB sys, 16GB container", 64000, 16384, 16000}, - {"16GB sys, 4GB container", 16384, 4096, 4000}, - {"4GB sys, 3.5GB container", 16384, 3500, 3452}, - {"2GB sys, 2GB container", 16384, 2048, 2048}, - {"2GB sys, unable to poll container", 16384, 0, 4000}, + {"128GB sys", 128000, 0, 1, 6000}, + {"64GB sys", 64000, 0, 1, 6000}, + {"32GB sys", 32768, 0, 1, 6000}, + {"16GB sys", 16384, 0, 1, 4000}, + {"odd sys", 14567, 0, 1, 3600}, + {"4GB sys", 4096, 0, 1, 2200}, + {"2GB sys", 2048, 0, 1, 2048}, + {"Unable to poll sys", 0, 0, 1, 2200}, + {"128GB sys, 16GB container", 128000, 16384, 1, 16336}, + {"64GB sys, 16GB container", 64000, 16384, 1, 16000}, + {"16GB sys, 4GB container", 16384, 4096, 1, 4000}, + {"4GB sys, 3.5GB container", 16384, 3500, 1, 3452}, + {"16GB sys, 2GB container", 16384, 2048, 1, 2048}, + {"16GB sys, unable to poll container", 16384, 0, 1, 4000}, + {"128GB sys 2 nodes", 128000, 0, 2, 6000}, + {"8GB sys 3 nodes", 8192, 0, 3, 2200}, + {"16GB sys 2 nodes", 16384, 0, 2, 2200}, + {"32GB sys 2 nodes", 32768, 0, 2, 4050}, + {"odd sys 2 nodes", 14567, 0, 2, 2200}, + {"4GB sys 2 nodes", 4096, 0, 2, 2200}, + {"2GB sys 3 nodes", 2048, 0, 3, 2048}, } for _, test := range tests { t.Run(test.description, func(t *testing.T) { - got := suggestMemoryAllocation(test.sysLimit, test.containerLimit) + got := suggestMemoryAllocation(test.sysLimit, test.containerLimit, test.nodes) if got != test.want { t.Errorf("defaultMemorySize(sys=%d, container=%d) = %d, want: %d", test.sysLimit, test.containerLimit, got, test.want) } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index aa6a908e0b0b..9571557ea14e 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -239,7 +239,8 @@ func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, k disableOthers = false } - // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. + // Preload is overly invasive for bare metal, and caching is not meaningful. + // KIC handles preload elsewhere. if driver.IsVM(cc.Driver) { if err := cr.Preload(cc.KubernetesConfig); err != nil { switch err.(type) { diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 5a9cc4f16aa3..c4814cd41558 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -29,7 +29,6 @@ func TestMultiNode(t *testing.T) { if NoneDriver() { t.Skip("none driver does not support multinode") } - MaybeParallel(t) type validatorFunc func(context.Context, *testing.T, string) profile := UniqueProfileName("multinode") @@ -65,7 +64,7 @@ func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) { } // Make sure minikube status shows 2 nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) if err != nil { t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) } @@ -89,7 +88,7 @@ func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile strin } // Make sure minikube status shows 3 nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) if err != nil { t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) } @@ -121,7 +120,7 @@ func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) } // Make sure minikube status shows 2 running nodes and 1 stopped one - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) if err != nil && rr.ExitCode != 7 { t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) } @@ -177,7 +176,7 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile } // Make sure status is back down to 2 hosts - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) if err != nil { t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) }