Skip to content

Commit

Permalink
Merge pull request #3767 from tstromberg/cache_images_int_test2
Browse files Browse the repository at this point in the history
caching: Fix containerd, improve console messages, add integration tests
  • Loading branch information
tstromberg authored Mar 14, 2019
2 parents 844a3ee + 4f44208 commit 016e3f3
Show file tree
Hide file tree
Showing 6 changed files with 40 additions and 58 deletions.
4 changes: 2 additions & 2 deletions cmd/minikube/cmd/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ func beginCacheImages(g *errgroup.Group, kVersion string) {
if !viper.GetBool(cacheImages) {
return
}
console.OutStyle("caching", "Caching images in the background ...")
console.OutStyle("caching", "Downloading Kubernetes %s images in the background ...", kVersion)
g.Go(func() error {
return machine.CacheImagesForBootstrapper(kVersion, viper.GetString(cmdcfg.Bootstrapper))
})
Expand Down Expand Up @@ -487,7 +487,7 @@ func waitCacheImages(g *errgroup.Group) {
if !viper.GetBool(cacheImages) {
return
}
console.OutStyle("waiting", "Waiting for image caching to complete ...")
console.OutStyle("waiting", "Waiting for image downloads to complete ...")
if err := g.Wait(); err != nil {
glog.Errorln("Error caching images: ", err)
}
Expand Down
5 changes: 2 additions & 3 deletions pkg/minikube/bootstrapper/kubeadm/kubeadm.go
Original file line number Diff line number Diff line change
Expand Up @@ -373,9 +373,8 @@ func NewKubeletConfig(k8s config.KubernetesConfig, r cruntime.Manager) (string,

func (k *KubeadmBootstrapper) UpdateCluster(cfg config.KubernetesConfig) error {
if cfg.ShouldLoadCachedImages {
err := machine.LoadImages(k.c, constants.GetKubeadmCachedImages(cfg.KubernetesVersion), constants.ImageCacheDir)
if err != nil {
return errors.Wrap(err, "loading cached images")
if err := machine.LoadImages(k.c, constants.GetKubeadmCachedImages(cfg.KubernetesVersion), constants.ImageCacheDir); err != nil {
console.Failure("Unable to load cached images: %v", err)
}
}
r, err := cruntime.New(cruntime.Config{Type: cfg.ContainerRuntime, Socket: cfg.CRISocket})
Expand Down
2 changes: 1 addition & 1 deletion pkg/minikube/cruntime/containerd.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func (r *Containerd) Disable() error {
// LoadImage loads an image into this runtime
func (r *Containerd) LoadImage(path string) error {
glog.Infof("Loading image: %s", path)
return r.Runner.Run(fmt.Sprintf("sudo ctr cri load %s", path))
return r.Runner.Run(fmt.Sprintf("sudo ctr images import %s", path))
}

// KubeletOptions returns kubelet options for a containerd
Expand Down
25 changes: 11 additions & 14 deletions test/integration/functional_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ import (
)

func TestFunctional(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t)
minikubeRunner.EnsureRunning()
r := NewMinikubeRunner(t)
r.EnsureRunning()
// This one is not parallel, and ensures the cluster comes up
// before we run any other tests.
t.Run("Status", testClusterStatus)
Expand All @@ -41,7 +41,7 @@ func TestFunctional(t *testing.T) {
t.Run("Provisioning", testProvisioning)
t.Run("Tunnel", testTunnel)

if !usingNoneDriver(minikubeRunner) {
if !usingNoneDriver(r) {
t.Run("EnvVars", testClusterEnv)
t.Run("SSH", testClusterSSH)
t.Run("IngressController", testIngressController)
Expand All @@ -50,25 +50,22 @@ func TestFunctional(t *testing.T) {
}

func TestFunctionalContainerd(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t)
r := NewMinikubeRunner(t)

if usingNoneDriver(minikubeRunner) {
if usingNoneDriver(r) {
t.Skip("Can't run containerd backend with none driver")
}

if minikubeRunner.GetStatus() != state.None.String() {
minikubeRunner.RunCommand("delete", true)
if r.GetStatus() != state.None.String() {
r.RunCommand("delete", true)
}

minikubeRunner.SetRuntime("containerd")
minikubeRunner.EnsureRunning()

r.Start("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
t.Run("Gvisor", testGvisor)
t.Run("GvisorRestart", testGvisorRestart)
minikubeRunner.RunCommand("delete", true)
r.RunCommand("delete", true)
}

// usingNoneDriver returns true if using the none driver
func usingNoneDriver(runner util.MinikubeRunner) bool {
return strings.Contains(runner.StartArgs, "--vm-driver=none")
func usingNoneDriver(r util.MinikubeRunner) bool {
return strings.Contains(r.StartArgs, "--vm-driver=none")
}
43 changes: 21 additions & 22 deletions test/integration/start_stop_delete_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,48 +30,47 @@ import (

func TestStartStop(t *testing.T) {
tests := []struct {
runtime string
name string
args []string
}{
{runtime: "docker"},
{runtime: "containerd"},
{runtime: "crio"},
{"docker+cache", []string{"--container-runtime=docker", "--cache-images"}},
{"containerd+cache", []string{"--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock", "--cache-images"}},
{"crio+cache", []string{"--container-runtime=crio", "--cache-images"}},
}

for _, test := range tests {
t.Run(test.runtime, func(t *testing.T) {
runner := NewMinikubeRunner(t)
if test.runtime != "docker" && usingNoneDriver(runner) {
t.Skipf("skipping, can't use %s with none driver", test.runtime)
t.Run(test.name, func(t *testing.T) {
r := NewMinikubeRunner(t)
if !strings.Contains(test.name, "docker") && usingNoneDriver(r) {
t.Skipf("skipping %s - incompatible with none driver", test.name)
}

runner.RunCommand("config set WantReportErrorPrompt false", true)
runner.RunCommand("delete", false)
runner.CheckStatus(state.None.String())
r.RunCommand("config set WantReportErrorPrompt false", true)
r.RunCommand("delete", false)
r.CheckStatus(state.None.String())
r.Start(test.args...)
r.CheckStatus(state.Running.String())

runner.SetRuntime(test.runtime)
runner.Start()
runner.CheckStatus(state.Running.String())

ip := runner.RunCommand("ip", true)
ip := r.RunCommand("ip", true)
ip = strings.TrimRight(ip, "\n")
if net.ParseIP(ip) == nil {
t.Fatalf("IP command returned an invalid address: %s", ip)
}

checkStop := func() error {
runner.RunCommand("stop", true)
return runner.CheckStatusNoFail(state.Stopped.String())
r.RunCommand("stop", true)
return r.CheckStatusNoFail(state.Stopped.String())
}

if err := util.Retry(t, checkStop, 5*time.Second, 6); err != nil {
t.Fatalf("timed out while checking stopped status: %v", err)
}

runner.Start()
runner.CheckStatus(state.Running.String())
r.Start(test.args...)
r.CheckStatus(state.Running.String())

runner.RunCommand("delete", true)
runner.CheckStatus(state.None.String())
r.RunCommand("delete", true)
r.CheckStatus(state.None.String())
})
}
}
19 changes: 3 additions & 16 deletions test/integration/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,11 +184,6 @@ func (m *MinikubeRunner) RunDaemon2(command string) (*exec.Cmd, *bufio.Reader, *
return cmd, bufio.NewReader(stdoutPipe), bufio.NewReader(stderrPipe)
}

// SetRuntime saves the runtime backend
func (m *MinikubeRunner) SetRuntime(runtime string) {
m.Runtime = runtime
}

func (m *MinikubeRunner) SSH(command string) (string, error) {
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, "ssh", command)
Expand All @@ -202,17 +197,9 @@ func (m *MinikubeRunner) SSH(command string) (string, error) {
return string(stdout), nil
}

func (m *MinikubeRunner) Start() {
opts := ""
// TODO(tstromberg): Deprecate this in favor of making it possible for tests to define explicit flags.
switch r := m.Runtime; r {
case "containerd":
opts = "--container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock"
case "crio":
opts = "--container-runtime=cri-o"
}
m.RunCommand(fmt.Sprintf("start %s %s %s --alsologtostderr --v=5", m.StartArgs, m.Args, opts), true)

func (m *MinikubeRunner) Start(opts ...string) {
cmd := fmt.Sprintf("start %s %s %s --alsologtostderr --v=2", m.StartArgs, m.Args, strings.Join(opts, " "))
m.RunCommand(cmd, true)
}

func (m *MinikubeRunner) EnsureRunning() {
Expand Down

0 comments on commit 016e3f3

Please sign in to comment.