From bc91f220fe235c9e421577b714e6fd76c7cbf780 Mon Sep 17 00:00:00 2001 From: Nick Santos Date: Mon, 7 Jun 2021 11:19:53 -0600 Subject: [PATCH] docker: change how we model "image builds show up in the cluster immediately" (#4598) * docker: change how we model "image builds show up in the cluster immediately" We used to treat this as a property of the cluster type + the container runtime. But this made it impossible to support clusters that sometimes use your docker runtime, and sometimes do not. For examples, see: - https://github.com/tilt-dev/tilt/issues/4587 - https://github.com/tilt-dev/tilt/issues/3654 - https://github.com/tilt-dev/tilt/issues/1729 - https://github.com/tilt-dev/tilt/issues/4544 This changes the data model so that "image builds show up in the cluster" is a property of the Docker client, not of the cluster. This should be much more flexible and correct, and help us support multiple clusters. Fixes https://github.com/tilt-dev/tilt/issues/4544 * Update internal/docker/env.go Co-authored-by: Maia McCormick Co-authored-by: Maia McCormick --- internal/build/docker_builder.go | 8 ++ internal/build/test_utils.go | 2 +- internal/cli/wire_gen.go | 76 +++++++-------- .../docker_container_updater.go | 5 + internal/docker/client_test.go | 54 ++++++----- internal/docker/clients.go | 4 +- internal/docker/env.go | 97 ++++++++++++++----- internal/docker/fake_client.go | 4 +- internal/engine/build_and_deployer_test.go | 8 +- .../buildcontrol/image_build_and_deployer.go | 55 ++++++----- .../image_build_and_deployer_test.go | 10 +- .../live_update_build_and_deployer.go | 28 +++--- .../live_update_build_and_deployer_test.go | 6 +- internal/engine/buildcontrol/update_mode.go | 8 +- internal/engine/buildcontrol/wire.go | 11 +-- internal/engine/buildcontrol/wire_gen.go | 20 ++-- internal/engine/wire.go | 27 +++++- internal/engine/wire_gen.go | 33 ++++++- internal/k8s/env.go | 13 ++- internal/k8s/minikube.go | 36 +++++-- internal/k8s/minikube_fake.go | 4 +- internal/k8s/minikube_test.go | 5 +- 22 files changed, 321 insertions(+), 193 deletions(-) diff --git a/internal/build/docker_builder.go b/internal/build/docker_builder.go index bd7277d256..ce7f4829a3 100644 --- a/internal/build/docker_builder.go +++ b/internal/build/docker_builder.go @@ -17,6 +17,7 @@ import ( "github.com/pkg/errors" "github.com/tilt-dev/tilt/internal/container" + "github.com/tilt-dev/tilt/internal/k8s" "github.com/tilt-dev/tilt/internal/docker" "github.com/tilt-dev/tilt/internal/dockerfile" @@ -35,6 +36,9 @@ type dockerImageBuilder struct { } type DockerBuilder interface { + // Returns whether this docker builder is going to build to the given kubernetes context. + WillBuildToKubeContext(kctx k8s.KubeContext) bool + BuildImage(ctx context.Context, ps *PipelineState, refs container.RefSet, db model.DockerBuild, filter model.PathMatcher) (container.TaggedRefs, error) DumpImageDeployRef(ctx context.Context, ref string) (reference.NamedTagged, error) PushImage(ctx context.Context, name reference.NamedTagged) error @@ -55,6 +59,10 @@ func NewDockerImageBuilder(dCli docker.Client, extraLabels dockerfile.Labels) *d } } +func (d *dockerImageBuilder) WillBuildToKubeContext(kctx k8s.KubeContext) bool { + return d.dCli.Env().WillBuildToKubeContext(kctx) +} + func (d *dockerImageBuilder) BuildImage(ctx context.Context, ps *PipelineState, refs container.RefSet, db model.DockerBuild, filter model.PathMatcher) (container.TaggedRefs, error) { paths := []PathMapping{ { diff --git a/internal/build/test_utils.go b/internal/build/test_utils.go index ddd6e365f4..0ac0faa200 100644 --- a/internal/build/test_utils.go +++ b/internal/build/test_utils.go @@ -48,7 +48,7 @@ func newDockerBuildFixture(t testing.TB) *dockerBuildFixture { ctx, _, _ := testutils.CtxAndAnalyticsForTest() env := k8s.EnvGKE - dEnv := docker.ProvideClusterEnv(ctx, env, wmcontainer.RuntimeDocker, k8s.FakeMinikube{}) + dEnv := docker.ProvideClusterEnv(ctx, "gke", env, wmcontainer.RuntimeDocker, k8s.FakeMinikube{}) dCli := docker.NewDockerClient(ctx, docker.Env(dEnv)) _, ok := dCli.(*docker.Cli) // If it wasn't an actual Docker client, it's an exploding client diff --git a/internal/cli/wire_gen.go b/internal/cli/wire_gen.go index 20cd2c1b5a..25515ea4e6 100644 --- a/internal/cli/wire_gen.go +++ b/internal/cli/wire_gen.go @@ -98,8 +98,8 @@ func wireTiltfileResult(ctx context.Context, analytics2 *analytics.TiltAnalytics versionExtension := version.NewExtension(tiltBuild) configExtension := config.NewExtension(subcommand) runtime := k8s.ProvideContainerRuntime(ctx, client) - clusterEnv := docker.ProvideClusterEnv(ctx, env, runtime, minikubeClient) - localEnv := docker.ProvideLocalEnv(ctx, clusterEnv) + clusterEnv := docker.ProvideClusterEnv(ctx, kubeContext, env, runtime, minikubeClient) + localEnv := docker.ProvideLocalEnv(ctx, kubeContext, env, clusterEnv) dockerComposeClient := dockercompose.NewDockerComposeClient(localEnv) webHost := provideWebHost() defaults := _wireDefaultsValue @@ -119,20 +119,20 @@ func wireDockerPrune(ctx context.Context, analytics2 *analytics.TiltAnalytics, s if err != nil { return dpDeps{}, err } + kubeContext, err := k8s.ProvideKubeContext(apiConfig) + if err != nil { + return dpDeps{}, err + } env := k8s.ProvideEnv(ctx, apiConfig) restConfigOrError := k8s.ProvideRESTConfig(clientConfig) clientsetOrError := k8s.ProvideClientset(restConfigOrError) portForwardClient := k8s.ProvidePortForwardClient(restConfigOrError, clientsetOrError) namespace := k8s.ProvideConfigNamespace(clientConfig) - kubeContext, err := k8s.ProvideKubeContext(apiConfig) - if err != nil { - return dpDeps{}, err - } minikubeClient := k8s.ProvideMinikubeClient(kubeContext) client := k8s.ProvideK8sClient(ctx, env, restConfigOrError, clientsetOrError, portForwardClient, namespace, minikubeClient, clientConfig) runtime := k8s.ProvideContainerRuntime(ctx, client) - clusterEnv := docker.ProvideClusterEnv(ctx, env, runtime, minikubeClient) - localEnv := docker.ProvideLocalEnv(ctx, clusterEnv) + clusterEnv := docker.ProvideClusterEnv(ctx, kubeContext, env, runtime, minikubeClient) + localEnv := docker.ProvideLocalEnv(ctx, kubeContext, env, clusterEnv) localClient := docker.ProvideLocalCli(ctx, localEnv) clusterClient, err := docker.ProvideClusterCli(ctx, localEnv, clusterEnv, localClient) if err != nil { @@ -257,8 +257,8 @@ func wireCmdUp(ctx context.Context, analytics3 *analytics.TiltAnalytics, cmdTags subscriber := portforward.NewSubscriber(client, deferredClient) fswatchManifestSubscriber := fswatch.NewManifestSubscriber(deferredClient) runtime := k8s.ProvideContainerRuntime(ctx, client) - clusterEnv := docker.ProvideClusterEnv(ctx, env, runtime, minikubeClient) - localEnv := docker.ProvideLocalEnv(ctx, clusterEnv) + clusterEnv := docker.ProvideClusterEnv(ctx, kubeContext, env, runtime, minikubeClient) + localEnv := docker.ProvideLocalEnv(ctx, kubeContext, env, clusterEnv) localClient := docker.ProvideLocalCli(ctx, localEnv) clusterClient, err := docker.ProvideClusterCli(ctx, localEnv, clusterEnv, localClient) if err != nil { @@ -268,19 +268,19 @@ func wireCmdUp(ctx context.Context, analytics3 *analytics.TiltAnalytics, cmdTags dockerUpdater := containerupdate.NewDockerUpdater(switchCli) execUpdater := containerupdate.NewExecUpdater(client) buildcontrolUpdateModeFlag := provideUpdateModeFlag() - updateMode, err := buildcontrol.ProvideUpdateMode(buildcontrolUpdateModeFlag, env, runtime) + updateMode, err := buildcontrol.ProvideUpdateMode(buildcontrolUpdateModeFlag, kubeContext, clusterEnv) if err != nil { return CmdUpDeps{}, err } clock := build.ProvideClock() - liveUpdateBuildAndDeployer := buildcontrol.NewLiveUpdateBuildAndDeployer(dockerUpdater, execUpdater, updateMode, env, runtime, clock) + liveUpdateBuildAndDeployer := buildcontrol.NewLiveUpdateBuildAndDeployer(dockerUpdater, execUpdater, updateMode, kubeContext, clock) labels := _wireLabelsValue dockerImageBuilder := build.NewDockerImageBuilder(switchCli, labels) dockerBuilder := build.DefaultDockerBuilder(dockerImageBuilder) execCustomBuilder := build.NewExecCustomBuilder(switchCli, clock) clusterName := k8s.ProvideClusterName(ctx, apiConfig) kindLoader := buildcontrol.NewKINDLoader(env, clusterName) - imageBuildAndDeployer := buildcontrol.NewImageBuildAndDeployer(dockerBuilder, execCustomBuilder, client, env, analytics3, updateMode, clock, runtime, kindLoader) + imageBuildAndDeployer := buildcontrol.NewImageBuildAndDeployer(dockerBuilder, execCustomBuilder, client, env, kubeContext, analytics3, updateMode, clock, kindLoader) dockerComposeClient := dockercompose.NewDockerComposeClient(localEnv) imageBuilder := buildcontrol.NewImageBuilder(dockerBuilder, execCustomBuilder, updateMode) dockerComposeBuildAndDeployer := buildcontrol.NewDockerComposeBuildAndDeployer(dockerComposeClient, switchCli, imageBuilder, clock) @@ -451,8 +451,8 @@ func wireCmdCI(ctx context.Context, analytics3 *analytics.TiltAnalytics, subcomm subscriber := portforward.NewSubscriber(client, deferredClient) fswatchManifestSubscriber := fswatch.NewManifestSubscriber(deferredClient) runtime := k8s.ProvideContainerRuntime(ctx, client) - clusterEnv := docker.ProvideClusterEnv(ctx, env, runtime, minikubeClient) - localEnv := docker.ProvideLocalEnv(ctx, clusterEnv) + clusterEnv := docker.ProvideClusterEnv(ctx, kubeContext, env, runtime, minikubeClient) + localEnv := docker.ProvideLocalEnv(ctx, kubeContext, env, clusterEnv) localClient := docker.ProvideLocalCli(ctx, localEnv) clusterClient, err := docker.ProvideClusterCli(ctx, localEnv, clusterEnv, localClient) if err != nil { @@ -462,19 +462,19 @@ func wireCmdCI(ctx context.Context, analytics3 *analytics.TiltAnalytics, subcomm dockerUpdater := containerupdate.NewDockerUpdater(switchCli) execUpdater := containerupdate.NewExecUpdater(client) buildcontrolUpdateModeFlag := provideUpdateModeFlag() - updateMode, err := buildcontrol.ProvideUpdateMode(buildcontrolUpdateModeFlag, env, runtime) + updateMode, err := buildcontrol.ProvideUpdateMode(buildcontrolUpdateModeFlag, kubeContext, clusterEnv) if err != nil { return CmdCIDeps{}, err } clock := build.ProvideClock() - liveUpdateBuildAndDeployer := buildcontrol.NewLiveUpdateBuildAndDeployer(dockerUpdater, execUpdater, updateMode, env, runtime, clock) + liveUpdateBuildAndDeployer := buildcontrol.NewLiveUpdateBuildAndDeployer(dockerUpdater, execUpdater, updateMode, kubeContext, clock) labels := _wireLabelsValue dockerImageBuilder := build.NewDockerImageBuilder(switchCli, labels) dockerBuilder := build.DefaultDockerBuilder(dockerImageBuilder) execCustomBuilder := build.NewExecCustomBuilder(switchCli, clock) clusterName := k8s.ProvideClusterName(ctx, apiConfig) kindLoader := buildcontrol.NewKINDLoader(env, clusterName) - imageBuildAndDeployer := buildcontrol.NewImageBuildAndDeployer(dockerBuilder, execCustomBuilder, client, env, analytics3, updateMode, clock, runtime, kindLoader) + imageBuildAndDeployer := buildcontrol.NewImageBuildAndDeployer(dockerBuilder, execCustomBuilder, client, env, kubeContext, analytics3, updateMode, clock, kindLoader) dockerComposeClient := dockercompose.NewDockerComposeClient(localEnv) imageBuilder := buildcontrol.NewImageBuilder(dockerBuilder, execCustomBuilder, updateMode) dockerComposeBuildAndDeployer := buildcontrol.NewDockerComposeBuildAndDeployer(dockerComposeClient, switchCli, imageBuilder, clock) @@ -765,20 +765,20 @@ func wireDockerClusterClient(ctx context.Context) (docker.ClusterClient, error) if err != nil { return nil, err } + kubeContext, err := k8s.ProvideKubeContext(apiConfig) + if err != nil { + return nil, err + } env := k8s.ProvideEnv(ctx, apiConfig) restConfigOrError := k8s.ProvideRESTConfig(clientConfig) clientsetOrError := k8s.ProvideClientset(restConfigOrError) portForwardClient := k8s.ProvidePortForwardClient(restConfigOrError, clientsetOrError) namespace := k8s.ProvideConfigNamespace(clientConfig) - kubeContext, err := k8s.ProvideKubeContext(apiConfig) - if err != nil { - return nil, err - } minikubeClient := k8s.ProvideMinikubeClient(kubeContext) k8sClient := k8s.ProvideK8sClient(ctx, env, restConfigOrError, clientsetOrError, portForwardClient, namespace, minikubeClient, clientConfig) runtime := k8s.ProvideContainerRuntime(ctx, k8sClient) - clusterEnv := docker.ProvideClusterEnv(ctx, env, runtime, minikubeClient) - localEnv := docker.ProvideLocalEnv(ctx, clusterEnv) + clusterEnv := docker.ProvideClusterEnv(ctx, kubeContext, env, runtime, minikubeClient) + localEnv := docker.ProvideLocalEnv(ctx, kubeContext, env, clusterEnv) localClient := docker.ProvideLocalCli(ctx, localEnv) clusterClient, err := docker.ProvideClusterCli(ctx, localEnv, clusterEnv, localClient) if err != nil { @@ -794,20 +794,20 @@ func wireDockerLocalClient(ctx context.Context) (docker.LocalClient, error) { if err != nil { return nil, err } + kubeContext, err := k8s.ProvideKubeContext(apiConfig) + if err != nil { + return nil, err + } env := k8s.ProvideEnv(ctx, apiConfig) restConfigOrError := k8s.ProvideRESTConfig(clientConfig) clientsetOrError := k8s.ProvideClientset(restConfigOrError) portForwardClient := k8s.ProvidePortForwardClient(restConfigOrError, clientsetOrError) namespace := k8s.ProvideConfigNamespace(clientConfig) - kubeContext, err := k8s.ProvideKubeContext(apiConfig) - if err != nil { - return nil, err - } minikubeClient := k8s.ProvideMinikubeClient(kubeContext) k8sClient := k8s.ProvideK8sClient(ctx, env, restConfigOrError, clientsetOrError, portForwardClient, namespace, minikubeClient, clientConfig) runtime := k8s.ProvideContainerRuntime(ctx, k8sClient) - clusterEnv := docker.ProvideClusterEnv(ctx, env, runtime, minikubeClient) - localEnv := docker.ProvideLocalEnv(ctx, clusterEnv) + clusterEnv := docker.ProvideClusterEnv(ctx, kubeContext, env, runtime, minikubeClient) + localEnv := docker.ProvideLocalEnv(ctx, kubeContext, env, clusterEnv) localClient := docker.ProvideLocalCli(ctx, localEnv) return localClient, nil } @@ -835,8 +835,8 @@ func wireDownDeps(ctx context.Context, tiltAnalytics *analytics.TiltAnalytics, s versionExtension := version.NewExtension(tiltBuild) configExtension := config.NewExtension(subcommand) runtime := k8s.ProvideContainerRuntime(ctx, k8sClient) - clusterEnv := docker.ProvideClusterEnv(ctx, env, runtime, minikubeClient) - localEnv := docker.ProvideLocalEnv(ctx, clusterEnv) + clusterEnv := docker.ProvideClusterEnv(ctx, kubeContext, env, runtime, minikubeClient) + localEnv := docker.ProvideLocalEnv(ctx, kubeContext, env, clusterEnv) dockerComposeClient := dockercompose.NewDockerComposeClient(localEnv) webHost := provideWebHost() defaults := _wireDefaultsValue @@ -865,20 +865,20 @@ func wireDumpImageDeployRefDeps(ctx context.Context) (DumpImageDeployRefDeps, er if err != nil { return DumpImageDeployRefDeps{}, err } + kubeContext, err := k8s.ProvideKubeContext(apiConfig) + if err != nil { + return DumpImageDeployRefDeps{}, err + } env := k8s.ProvideEnv(ctx, apiConfig) restConfigOrError := k8s.ProvideRESTConfig(clientConfig) clientsetOrError := k8s.ProvideClientset(restConfigOrError) portForwardClient := k8s.ProvidePortForwardClient(restConfigOrError, clientsetOrError) namespace := k8s.ProvideConfigNamespace(clientConfig) - kubeContext, err := k8s.ProvideKubeContext(apiConfig) - if err != nil { - return DumpImageDeployRefDeps{}, err - } minikubeClient := k8s.ProvideMinikubeClient(kubeContext) k8sClient := k8s.ProvideK8sClient(ctx, env, restConfigOrError, clientsetOrError, portForwardClient, namespace, minikubeClient, clientConfig) runtime := k8s.ProvideContainerRuntime(ctx, k8sClient) - clusterEnv := docker.ProvideClusterEnv(ctx, env, runtime, minikubeClient) - localEnv := docker.ProvideLocalEnv(ctx, clusterEnv) + clusterEnv := docker.ProvideClusterEnv(ctx, kubeContext, env, runtime, minikubeClient) + localEnv := docker.ProvideLocalEnv(ctx, kubeContext, env, clusterEnv) localClient := docker.ProvideLocalCli(ctx, localEnv) clusterClient, err := docker.ProvideClusterCli(ctx, localEnv, clusterEnv, localClient) if err != nil { diff --git a/internal/containerupdate/docker_container_updater.go b/internal/containerupdate/docker_container_updater.go index 2233548c9d..4e0a1b7000 100644 --- a/internal/containerupdate/docker_container_updater.go +++ b/internal/containerupdate/docker_container_updater.go @@ -11,6 +11,7 @@ import ( "github.com/tilt-dev/tilt/internal/build" "github.com/tilt-dev/tilt/internal/container" "github.com/tilt-dev/tilt/internal/docker" + "github.com/tilt-dev/tilt/internal/k8s" "github.com/tilt-dev/tilt/internal/store" "github.com/tilt-dev/tilt/pkg/logger" "github.com/tilt-dev/tilt/pkg/model" @@ -26,6 +27,10 @@ func NewDockerUpdater(dCli docker.Client) *DockerUpdater { return &DockerUpdater{dCli: dCli} } +func (cu *DockerUpdater) WillBuildToKubeContext(kctx k8s.KubeContext) bool { + return cu.dCli.Env().WillBuildToKubeContext(kctx) +} + func (cu *DockerUpdater) UpdateContainer(ctx context.Context, cInfo store.ContainerInfo, archiveToCopy io.Reader, filesToDelete []string, cmds []model.Cmd, hotReload bool) error { l := logger.Get(ctx) diff --git a/internal/docker/client_test.go b/internal/docker/client_test.go index 625699e85e..989f4f4e85 100644 --- a/internal/docker/client_test.go +++ b/internal/docker/client_test.go @@ -132,10 +132,13 @@ func TestProvideEnv(t *testing.T) { }, }, { - env: k8s.EnvMicroK8s, - runtime: container.RuntimeDocker, - expectedCluster: Env{Host: microK8sDockerHost}, - expectedLocal: Env{}, + env: k8s.EnvMicroK8s, + runtime: container.RuntimeDocker, + expectedCluster: Env{ + Host: microK8sDockerHost, + BuildToKubeContexts: []string{"microk8s-me"}, + }, + expectedLocal: Env{}, }, { env: k8s.EnvMicroK8s, @@ -151,11 +154,12 @@ func TestProvideEnv(t *testing.T) { "DOCKER_API_VERSION": "1.35", }, expectedCluster: Env{ - TLSVerify: "1", - Host: "tcp://192.168.99.100:2376", - CertPath: "/home/nick/.minikube/certs", - APIVersion: "1.35", - IsOldMinikube: true, + TLSVerify: "1", + Host: "tcp://192.168.99.100:2376", + CertPath: "/home/nick/.minikube/certs", + APIVersion: "1.35", + IsOldMinikube: true, + BuildToKubeContexts: []string{"minikube-me"}, }, }, { @@ -169,10 +173,11 @@ func TestProvideEnv(t *testing.T) { "DOCKER_API_VERSION": "1.35", }, expectedCluster: Env{ - TLSVerify: "1", - Host: "tcp://192.168.99.100:2376", - CertPath: "/home/nick/.minikube/certs", - APIVersion: "1.35", + TLSVerify: "1", + Host: "tcp://192.168.99.100:2376", + CertPath: "/home/nick/.minikube/certs", + APIVersion: "1.35", + BuildToKubeContexts: []string{"minikube-me"}, }, }, { @@ -210,16 +215,18 @@ func TestProvideEnv(t *testing.T) { "DOCKER_CERT_PATH": "/home/nick/.minikube/certs", }, expectedCluster: Env{ - TLSVerify: "1", - Host: "tcp://192.168.99.100:2376", - CertPath: "/home/nick/.minikube/certs", - IsOldMinikube: true, + TLSVerify: "1", + Host: "tcp://192.168.99.100:2376", + CertPath: "/home/nick/.minikube/certs", + IsOldMinikube: true, + BuildToKubeContexts: []string{"minikube-me"}, }, expectedLocal: Env{ - TLSVerify: "1", - Host: "tcp://192.168.99.100:2376", - CertPath: "/home/nick/.minikube/certs", - IsOldMinikube: true, + TLSVerify: "1", + Host: "tcp://192.168.99.100:2376", + CertPath: "/home/nick/.minikube/certs", + IsOldMinikube: true, + BuildToKubeContexts: []string{"minikube-me"}, }, }, { @@ -275,10 +282,11 @@ func TestProvideEnv(t *testing.T) { } mkClient := k8s.FakeMinikube{DockerEnvMap: c.mkEnv, FakeVersion: minikubeV} - cluster := ProvideClusterEnv(context.Background(), c.env, c.runtime, mkClient) + kubeContext := k8s.KubeContext(fmt.Sprintf("%s-me", c.env)) + cluster := ProvideClusterEnv(context.Background(), kubeContext, c.env, c.runtime, mkClient) assert.Equal(t, c.expectedCluster, Env(cluster)) - local := ProvideLocalEnv(context.Background(), cluster) + local := ProvideLocalEnv(context.Background(), kubeContext, c.env, cluster) assert.Equal(t, c.expectedLocal, Env(local)) }) } diff --git a/internal/docker/clients.go b/internal/docker/clients.go index 14313565e4..76c8fe4b12 100644 --- a/internal/docker/clients.go +++ b/internal/docker/clients.go @@ -2,6 +2,8 @@ package docker import ( "context" + + "github.com/google/go-cmp/cmp" ) type LocalClient Client @@ -16,7 +18,7 @@ func ProvideClusterCli(ctx context.Context, lEnv LocalEnv, cEnv ClusterEnv, lCli // If the Cluster Env and the LocalEnv are the same, we can re-use the cluster // client as a local client. var cClient ClusterClient - if Env(lEnv) == Env(cEnv) { + if cmp.Equal(Env(lEnv), Env(cEnv)) { cClient = ClusterClient(lClient) } else { cClient = NewDockerClient(ctx, Env(cEnv)) diff --git a/internal/docker/env.go b/internal/docker/env.go index 15b0892aac..a299c08c67 100644 --- a/internal/docker/env.go +++ b/internal/docker/env.go @@ -33,11 +33,33 @@ type Env struct { // https://github.com/kubernetes/minikube/issues/4143 IsOldMinikube bool + // Some Kubernetes contexts have a Docker daemon that they use directly + // as their container runtime. Any images built on that daemon will + // show up automatically in the runtime. + // + // We used to store this as a property of the k8s env but now store it as a field of the Docker Env, because this + // really affects how we interact with the Docker Env (rather than + // how we interact with the K8s Env). + // + // In theory, you can have more than one, but in practice, + // this is very difficult to set up. + BuildToKubeContexts []string + // If the env failed to load for some reason, propagate that error // so that we can report it when the user tries to do a docker_build. Error error } +// Determines if this docker client can build images directly to the given cluster. +func (e Env) WillBuildToKubeContext(kctx k8s.KubeContext) bool { + for _, current := range e.BuildToKubeContexts { + if string(kctx) == current { + return true + } + } + return false +} + // Serializes this back to environment variables for os.Environ func (e Env) AsEnviron() []string { vars := []string{} @@ -60,7 +82,7 @@ func (e Env) AsEnviron() []string { type ClusterEnv Env type LocalEnv Env -func ProvideLocalEnv(ctx context.Context, cEnv ClusterEnv) LocalEnv { +func ProvideLocalEnv(ctx context.Context, kubeContext k8s.KubeContext, env k8s.Env, cEnv ClusterEnv) LocalEnv { result := overlayOSEnvVars(Env{}) // The user may have already configured their local docker client @@ -68,50 +90,64 @@ func ProvideLocalEnv(ctx context.Context, cEnv ClusterEnv) LocalEnv { // the hosts of the LocalEnv and ClusterEnv. if cEnv.Host == result.Host { result.IsOldMinikube = cEnv.IsOldMinikube + result.BuildToKubeContexts = cEnv.BuildToKubeContexts + } + + if env == k8s.EnvDockerDesktop && isDefaultHost(result) { + result.BuildToKubeContexts = append(result.BuildToKubeContexts, string(kubeContext)) } return LocalEnv(result) } -func ProvideClusterEnv(ctx context.Context, env k8s.Env, runtime container.Runtime, minikubeClient k8s.MinikubeClient) ClusterEnv { +func ProvideClusterEnv(ctx context.Context, kubeContext k8s.KubeContext, env k8s.Env, runtime container.Runtime, minikubeClient k8s.MinikubeClient) ClusterEnv { result := Env{} if runtime == container.RuntimeDocker { if env == k8s.EnvMinikube { // If we're running Minikube with a docker runtime, talk to Minikube's docker socket. - envMap, err := minikubeClient.DockerEnv(ctx) + envMap, ok, err := minikubeClient.DockerEnv(ctx) if err != nil { return ClusterEnv{Error: err} } - host := envMap["DOCKER_HOST"] - if host != "" { - result.Host = host - } - - apiVersion := envMap["DOCKER_API_VERSION"] - if apiVersion != "" { - result.APIVersion = apiVersion - } - - certPath := envMap["DOCKER_CERT_PATH"] - if certPath != "" { - result.CertPath = certPath - } - - tlsVerify := envMap["DOCKER_TLS_VERIFY"] - if tlsVerify != "" { - result.TLSVerify = tlsVerify + if ok { + host := envMap["DOCKER_HOST"] + if host != "" { + result.Host = host + } + + apiVersion := envMap["DOCKER_API_VERSION"] + if apiVersion != "" { + result.APIVersion = apiVersion + } + + certPath := envMap["DOCKER_CERT_PATH"] + if certPath != "" { + result.CertPath = certPath + } + + tlsVerify := envMap["DOCKER_TLS_VERIFY"] + if tlsVerify != "" { + result.TLSVerify = tlsVerify + } + + result.IsOldMinikube = isOldMinikube(ctx, minikubeClient) + result.BuildToKubeContexts = append(result.BuildToKubeContexts, string(kubeContext)) } - - result.IsOldMinikube = isOldMinikube(ctx, minikubeClient) } else if env == k8s.EnvMicroK8s { // If we're running Microk8s with a docker runtime, talk to Microk8s's docker socket. result.Host = microK8sDockerHost + result.BuildToKubeContexts = append(result.BuildToKubeContexts, string(kubeContext)) } } - return ClusterEnv(overlayOSEnvVars(Env(result))) + result = overlayOSEnvVars(result) + if env == k8s.EnvDockerDesktop && isDefaultHost(result) { + result.BuildToKubeContexts = append(result.BuildToKubeContexts, string(kubeContext)) + } + + return ClusterEnv(result) } func isOldMinikube(ctx context.Context, minikubeClient k8s.MinikubeClient) bool { @@ -130,6 +166,19 @@ func isOldMinikube(ctx context.Context, minikubeClient k8s.MinikubeClient) bool return minMinikubeVersionBuildkit.GTE(vParsed) } +func isDefaultHost(e Env) bool { + if e.Host == "" { + return true + } + + defaultHost, err := opts.ParseHost(true, "") + if err != nil { + return false + } + + return e.Host == defaultHost +} + func overlayOSEnvVars(result Env) Env { host := os.Getenv("DOCKER_HOST") if host != "" { diff --git a/internal/docker/fake_client.go b/internal/docker/fake_client.go index 9847a2c0e7..c6061fb2e0 100644 --- a/internal/docker/fake_client.go +++ b/internal/docker/fake_client.go @@ -82,6 +82,8 @@ type ExecCall struct { } type FakeClient struct { + FakeEnv Env + PushCount int PushImage string PushOptions types.ImagePushOptions @@ -148,7 +150,7 @@ func (c *FakeClient) CheckConnected() error { return c.CheckConnectedErr } func (c *FakeClient) Env() Env { - return Env{} + return c.FakeEnv } func (c *FakeClient) BuilderVersion() types.BuilderVersion { return types.BuilderV1 diff --git a/internal/engine/build_and_deployer_test.go b/internal/engine/build_and_deployer_test.go index 51d0d838ce..0eed0e518f 100644 --- a/internal/engine/build_and_deployer_test.go +++ b/internal/engine/build_and_deployer_test.go @@ -775,8 +775,8 @@ func newBDFixtureWithUpdateMode(t *testing.T, env k8s.Env, runtime container.Run ctx, cancel := context.WithCancel(ctx) f := tempdir.NewTempDirFixture(t) dir := dirs.NewTiltDevDirAt(f.Path()) - docker := docker.NewFakeClient() - docker.ContainerListOutput = map[string][]types.Container{ + dockerClient := docker.NewFakeClient() + dockerClient.ContainerListOutput = map[string][]types.Container{ "pod": []types.Container{ types.Container{ ID: k8s.MagicTestContainerID, @@ -788,7 +788,7 @@ func newBDFixtureWithUpdateMode(t *testing.T, env k8s.Env, runtime container.Run mode := buildcontrol.UpdateModeFlag(um) dcc := dockercompose.NewFakeDockerComposeClient(t, ctx) kl := &fakeKINDLoader{} - bd, err := provideBuildAndDeployer(ctx, docker, k8s, dir, env, mode, dcc, fakeClock{now: time.Unix(1551202573, 0)}, kl, ta) + bd, err := provideFakeBuildAndDeployer(ctx, dockerClient, k8s, dir, env, mode, dcc, fakeClock{now: time.Unix(1551202573, 0)}, kl, ta) if err != nil { t.Fatal(err) } @@ -799,7 +799,7 @@ func newBDFixtureWithUpdateMode(t *testing.T, env k8s.Env, runtime container.Run TempDirFixture: f, ctx: ctx, cancel: cancel, - docker: docker, + docker: dockerClient, k8s: k8s, bd: bd, st: st, diff --git a/internal/engine/buildcontrol/image_build_and_deployer.go b/internal/engine/buildcontrol/image_build_and_deployer.go index 83a481e7a1..bcf40d308d 100644 --- a/internal/engine/buildcontrol/image_build_and_deployer.go +++ b/internal/engine/buildcontrol/image_build_and_deployer.go @@ -57,14 +57,14 @@ func NewKINDLoader(env k8s.Env, clusterName k8s.ClusterName) KINDLoader { } type ImageBuildAndDeployer struct { - db build.DockerBuilder - ib *ImageBuilder - k8sClient k8s.Client - env k8s.Env - runtime container.Runtime - analytics *analytics.TiltAnalytics - clock build.Clock - kl KINDLoader + db build.DockerBuilder + ib *ImageBuilder + k8sClient k8s.Client + env k8s.Env + kubeContext k8s.KubeContext + analytics *analytics.TiltAnalytics + clock build.Clock + kl KINDLoader } func NewImageBuildAndDeployer( @@ -72,21 +72,21 @@ func NewImageBuildAndDeployer( customBuilder build.CustomBuilder, k8sClient k8s.Client, env k8s.Env, + kubeContext k8s.KubeContext, analytics *analytics.TiltAnalytics, updMode UpdateMode, c build.Clock, - runtime container.Runtime, kl KINDLoader, ) *ImageBuildAndDeployer { return &ImageBuildAndDeployer{ - db: db, - ib: NewImageBuilder(db, customBuilder, updMode), - k8sClient: k8sClient, - env: env, - analytics: analytics, - clock: c, - runtime: runtime, - kl: kl, + db: db, + ib: NewImageBuilder(db, customBuilder, updMode), + k8sClient: k8sClient, + env: env, + kubeContext: kubeContext, + analytics: analytics, + clock: c, + kl: kl, } } @@ -199,8 +199,15 @@ func (ibd *ImageBuildAndDeployer) push(ctx context.Context, ref reference.NamedT // We can also skip the push of the image if it isn't used // in any k8s resources! (e.g., it's consumed by another image). - if ibd.canAlwaysSkipPush() || !IsImageDeployedToK8s(iTarget, kTarget) || cbSkip { - ps.Printf(ctx, "Skipping push") + + if cbSkip { + ps.Printf(ctx, "Skipping push: custom_build() configured to handle push itself") + return nil + } else if !IsImageDeployedToK8s(iTarget, kTarget) { + ps.Printf(ctx, "Skipping push: base image does not need deploy") + return nil + } else if ibd.db.WillBuildToKubeContext(ibd.kubeContext) { + ps.Printf(ctx, "Skipping push: building on cluster's container runtime") return nil } @@ -363,7 +370,7 @@ func (ibd *ImageBuildAndDeployer) createEntitiesToDeploy(ctx context.Context, // When working with a local k8s cluster, we set the pull policy to Never, // to ensure that k8s fails hard if the image is missing from docker. policy := v1.PullIfNotPresent - if ibd.canAlwaysSkipPush() { + if ibd.db.WillBuildToKubeContext(ibd.kubeContext) { policy = v1.PullNever } @@ -413,14 +420,6 @@ func (ibd *ImageBuildAndDeployer) createEntitiesToDeploy(ctx context.Context, return newK8sEntities, nil } -// If we're using docker-for-desktop as our k8s backend, -// we don't need to push to the central registry. -// The k8s will use the image already available -// in the local docker daemon. -func (ibd *ImageBuildAndDeployer) canAlwaysSkipPush() bool { - return ibd.env.UsesLocalDockerRegistry() && ibd.runtime == container.RuntimeDocker -} - // Create a new ImageTarget with the Dockerfiles rewritten with the injected images. func InjectImageDependencies(iTarget model.ImageTarget, iTargetMap map[model.TargetID]model.ImageTarget, deps []store.BuildResult) (model.ImageTarget, error) { if len(deps) == 0 { diff --git a/internal/engine/buildcontrol/image_build_and_deployer_test.go b/internal/engine/buildcontrol/image_build_and_deployer_test.go index a388dff6c0..3f67a974c6 100644 --- a/internal/engine/buildcontrol/image_build_and_deployer_test.go +++ b/internal/engine/buildcontrol/image_build_and_deployer_test.go @@ -996,11 +996,11 @@ func newIBDFixture(t *testing.T, env k8s.Env) *ibdFixture { f := tempdir.NewTempDirFixture(t) dir := dirs.NewTiltDevDirAt(f.Path()) - docker := docker.NewFakeClient() + dockerClient := docker.NewFakeClient() // Make the fake ImageExists always return true, which is the behavior we want // when testing the ImageBuildAndDeployer. - docker.ImageAlwaysExists = true + dockerClient.ImageAlwaysExists = true out := bufsync.NewThreadSafeBuffer() ctx, _, ta := testutils.CtxAndAnalyticsForTest() @@ -1008,7 +1008,9 @@ func newIBDFixture(t *testing.T, env k8s.Env) *ibdFixture { kClient := k8s.NewFakeK8sClient(t) kl := &fakeKINDLoader{} clock := fakeClock{time.Date(2019, 1, 1, 1, 1, 1, 1, time.UTC)} - ibd, err := ProvideImageBuildAndDeployer(ctx, docker, kClient, env, dir, clock, kl, ta) + kubeContext := k8s.KubeContext(fmt.Sprintf("%s-me", env)) + clusterEnv := docker.ClusterEnv(docker.Env{}) + ibd, err := ProvideImageBuildAndDeployer(ctx, dockerClient, kClient, env, kubeContext, clusterEnv, dir, clock, kl, ta) if err != nil { t.Fatal(err) } @@ -1016,7 +1018,7 @@ func newIBDFixture(t *testing.T, env k8s.Env) *ibdFixture { TempDirFixture: f, out: out, ctx: ctx, - docker: docker, + docker: dockerClient, k8s: kClient, ibd: ibd, st: store.NewTestingStore(), diff --git a/internal/engine/buildcontrol/live_update_build_and_deployer.go b/internal/engine/buildcontrol/live_update_build_and_deployer.go index d66aadd0aa..ea49072bf1 100644 --- a/internal/engine/buildcontrol/live_update_build_and_deployer.go +++ b/internal/engine/buildcontrol/live_update_build_and_deployer.go @@ -26,24 +26,24 @@ import ( var _ BuildAndDeployer = &LiveUpdateBuildAndDeployer{} type LiveUpdateBuildAndDeployer struct { - dcu *containerupdate.DockerUpdater - ecu *containerupdate.ExecUpdater - updMode UpdateMode - env k8s.Env - runtime container.Runtime - clock build.Clock + dcu *containerupdate.DockerUpdater + ecu *containerupdate.ExecUpdater + updMode UpdateMode + kubeContext k8s.KubeContext + clock build.Clock } func NewLiveUpdateBuildAndDeployer(dcu *containerupdate.DockerUpdater, ecu *containerupdate.ExecUpdater, - updMode UpdateMode, env k8s.Env, runtime container.Runtime, c build.Clock) *LiveUpdateBuildAndDeployer { + updMode UpdateMode, + kubeContext k8s.KubeContext, + c build.Clock) *LiveUpdateBuildAndDeployer { return &LiveUpdateBuildAndDeployer{ - dcu: dcu, - ecu: ecu, - updMode: updMode, - env: env, - runtime: runtime, - clock: c, + dcu: dcu, + ecu: ecu, + updMode: updMode, + kubeContext: kubeContext, + clock: c, } } @@ -254,7 +254,7 @@ func (lubad *LiveUpdateBuildAndDeployer) containerUpdaterForSpecs(specs []model. return lubad.ecu } - if lubad.runtime == container.RuntimeDocker && lubad.env.UsesLocalDockerRegistry() { + if lubad.dcu.WillBuildToKubeContext(lubad.kubeContext) { return lubad.dcu } diff --git a/internal/engine/buildcontrol/live_update_build_and_deployer_test.go b/internal/engine/buildcontrol/live_update_build_and_deployer_test.go index 5893cc7a82..f5809b2ec5 100644 --- a/internal/engine/buildcontrol/live_update_build_and_deployer_test.go +++ b/internal/engine/buildcontrol/live_update_build_and_deployer_test.go @@ -9,12 +9,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tilt-dev/tilt/internal/container" - "github.com/tilt-dev/tilt/internal/k8s" - "github.com/tilt-dev/tilt/internal/build" "github.com/tilt-dev/tilt/internal/containerupdate" "github.com/tilt-dev/tilt/internal/docker" + "github.com/tilt-dev/tilt/internal/k8s" "github.com/tilt-dev/tilt/internal/store" "github.com/tilt-dev/tilt/internal/testutils" "github.com/tilt-dev/tilt/internal/testutils/tempdir" @@ -356,7 +354,7 @@ type lcbadFixture struct { func newFixture(t testing.TB) *lcbadFixture { // HACK(maia): we don't need any real container updaters on this LiveUpdBaD since we're testing // a func further down the flow that takes a ContainerUpdater as an arg, so just pass nils - lubad := NewLiveUpdateBuildAndDeployer(nil, nil, UpdateModeAuto, k8s.EnvDockerDesktop, container.RuntimeDocker, fakeClock{}) + lubad := NewLiveUpdateBuildAndDeployer(nil, nil, UpdateModeAuto, k8s.KubeContext("fake-context"), fakeClock{}) fakeContainerUpdater := &containerupdate.FakeContainerUpdater{} ctx, _, _ := testutils.CtxAndAnalyticsForTest() st := store.NewTestingStore() diff --git a/internal/engine/buildcontrol/update_mode.go b/internal/engine/buildcontrol/update_mode.go index 171a9c2e17..f571cc41d0 100644 --- a/internal/engine/buildcontrol/update_mode.go +++ b/internal/engine/buildcontrol/update_mode.go @@ -3,7 +3,7 @@ package buildcontrol import ( "fmt" - "github.com/tilt-dev/tilt/internal/container" + "github.com/tilt-dev/tilt/internal/docker" "github.com/tilt-dev/tilt/internal/k8s" ) @@ -34,7 +34,7 @@ var AllUpdateModes = []UpdateMode{ UpdateModeKubectlExec, } -func ProvideUpdateMode(flag UpdateModeFlag, env k8s.Env, runtime container.Runtime) (UpdateMode, error) { +func ProvideUpdateMode(flag UpdateModeFlag, kubeContext k8s.KubeContext, env docker.ClusterEnv) (UpdateMode, error) { valid := false for _, mode := range AllUpdateModes { if mode == UpdateMode(flag) { @@ -48,8 +48,8 @@ func ProvideUpdateMode(flag UpdateModeFlag, env k8s.Env, runtime container.Runti mode := UpdateMode(flag) if mode == UpdateModeContainer { - if !env.UsesLocalDockerRegistry() || runtime != container.RuntimeDocker { - return "", fmt.Errorf("update mode %q is only valid with local Docker clusters like Docker For Mac, Minikube, and MicroK8s", flag) + if !docker.Env(env).WillBuildToKubeContext(kubeContext) { + return "", fmt.Errorf("update mode %q is only valid with local Docker clusters like Docker For Mac or Minikube", flag) } } diff --git a/internal/engine/buildcontrol/wire.go b/internal/engine/buildcontrol/wire.go index 6adc2f2370..ca06cfcbc8 100644 --- a/internal/engine/buildcontrol/wire.go +++ b/internal/engine/buildcontrol/wire.go @@ -48,6 +48,8 @@ func ProvideImageBuildAndDeployer( docker docker.Client, kClient k8s.Client, env k8s.Env, + kubeContext k8s.KubeContext, + clusterEnv docker.ClusterEnv, dir *dirs.TiltDevDir, clock build.Clock, kp KINDLoader, @@ -55,7 +57,6 @@ func ProvideImageBuildAndDeployer( wire.Build( BaseWireSet, wire.Value(UpdateModeFlag(UpdateModeAuto)), - k8s.ProvideContainerRuntime, ) return nil, nil @@ -70,18 +71,12 @@ func ProvideDockerComposeBuildAndDeployer( BaseWireSet, wire.Value(UpdateModeFlag(UpdateModeAuto)), build.ProvideClock, + wire.Value(docker.ClusterEnv(docker.Env{})), // EnvNone ensures that we get an exploding k8s client. - wire.Value(k8s.Env(k8s.EnvNone)), wire.Value(k8s.KubeContextOverride("")), k8s.ProvideClientConfig, - k8s.ProvideConfigNamespace, k8s.ProvideKubeContext, - k8s.ProvideK8sClient, - k8s.ProvideRESTConfig, - k8s.ProvideClientset, - k8s.ProvidePortForwardClient, - k8s.ProvideContainerRuntime, k8s.ProvideKubeConfig, ) diff --git a/internal/engine/buildcontrol/wire_gen.go b/internal/engine/buildcontrol/wire_gen.go index d87f84ee2a..3eb28444f4 100644 --- a/internal/engine/buildcontrol/wire_gen.go +++ b/internal/engine/buildcontrol/wire_gen.go @@ -23,18 +23,17 @@ import ( // Injectors from wire.go: -func ProvideImageBuildAndDeployer(ctx context.Context, docker2 docker.Client, kClient k8s.Client, env k8s.Env, dir *dirs.TiltDevDir, clock build.Clock, kp KINDLoader, analytics2 *analytics.TiltAnalytics) (*ImageBuildAndDeployer, error) { +func ProvideImageBuildAndDeployer(ctx context.Context, docker2 docker.Client, kClient k8s.Client, env k8s.Env, kubeContext k8s.KubeContext, clusterEnv docker.ClusterEnv, dir *dirs.TiltDevDir, clock build.Clock, kp KINDLoader, analytics2 *analytics.TiltAnalytics) (*ImageBuildAndDeployer, error) { labels := _wireLabelsValue dockerImageBuilder := build.NewDockerImageBuilder(docker2, labels) dockerBuilder := build.DefaultDockerBuilder(dockerImageBuilder) execCustomBuilder := build.NewExecCustomBuilder(docker2, clock) updateModeFlag := _wireUpdateModeFlagValue - runtime := k8s.ProvideContainerRuntime(ctx, kClient) - updateMode, err := ProvideUpdateMode(updateModeFlag, env, runtime) + updateMode, err := ProvideUpdateMode(updateModeFlag, kubeContext, clusterEnv) if err != nil { return nil, err } - imageBuildAndDeployer := NewImageBuildAndDeployer(dockerBuilder, execCustomBuilder, kClient, env, analytics2, updateMode, clock, runtime, kp) + imageBuildAndDeployer := NewImageBuildAndDeployer(dockerBuilder, execCustomBuilder, kClient, env, kubeContext, analytics2, updateMode, clock, kp) return imageBuildAndDeployer, nil } @@ -50,13 +49,8 @@ func ProvideDockerComposeBuildAndDeployer(ctx context.Context, dcCli dockercompo clock := build.ProvideClock() execCustomBuilder := build.NewExecCustomBuilder(dCli, clock) updateModeFlag := _wireBuildcontrolUpdateModeFlagValue - env := _wireEnvValue kubeContextOverride := _wireKubeContextOverrideValue clientConfig := k8s.ProvideClientConfig(kubeContextOverride) - restConfigOrError := k8s.ProvideRESTConfig(clientConfig) - clientsetOrError := k8s.ProvideClientset(restConfigOrError) - portForwardClient := k8s.ProvidePortForwardClient(restConfigOrError, clientsetOrError) - namespace := k8s.ProvideConfigNamespace(clientConfig) config, err := k8s.ProvideKubeConfig(clientConfig, kubeContextOverride) if err != nil { return nil, err @@ -65,10 +59,8 @@ func ProvideDockerComposeBuildAndDeployer(ctx context.Context, dcCli dockercompo if err != nil { return nil, err } - minikubeClient := k8s.ProvideMinikubeClient(kubeContext) - client := k8s.ProvideK8sClient(ctx, env, restConfigOrError, clientsetOrError, portForwardClient, namespace, minikubeClient, clientConfig) - runtime := k8s.ProvideContainerRuntime(ctx, client) - updateMode, err := ProvideUpdateMode(updateModeFlag, env, runtime) + clusterEnv := _wireClusterEnvValue + updateMode, err := ProvideUpdateMode(updateModeFlag, kubeContext, clusterEnv) if err != nil { return nil, err } @@ -79,8 +71,8 @@ func ProvideDockerComposeBuildAndDeployer(ctx context.Context, dcCli dockercompo var ( _wireBuildcontrolUpdateModeFlagValue = UpdateModeFlag(UpdateModeAuto) - _wireEnvValue = k8s.Env(k8s.EnvNone) _wireKubeContextOverrideValue = k8s.KubeContextOverride("") + _wireClusterEnvValue = docker.ClusterEnv(docker.Env{}) ) // wire.go: diff --git a/internal/engine/wire.go b/internal/engine/wire.go index f9e8a97ca4..a6e7c6703a 100644 --- a/internal/engine/wire.go +++ b/internal/engine/wire.go @@ -10,6 +10,7 @@ import ( "github.com/tilt-dev/wmclient/pkg/dirs" sdktrace "go.opentelemetry.io/otel/sdk/trace" + "github.com/tilt-dev/tilt/internal/container" "github.com/tilt-dev/tilt/internal/engine/buildcontrol" "github.com/tilt-dev/tilt/internal/analytics" @@ -39,7 +40,7 @@ var DeployerWireSet = wire.NewSet( DeployerBaseWireSet, ) -func provideBuildAndDeployer( +func provideFakeBuildAndDeployer( ctx context.Context, docker docker.Client, kClient k8s.Client, @@ -53,7 +54,31 @@ func provideBuildAndDeployer( wire.Build( DeployerWireSetTest, k8s.ProvideContainerRuntime, + provideFakeKubeContext, + provideFakeDockerClusterEnv, ) return nil, nil } + +func provideFakeKubeContext(env k8s.Env) k8s.KubeContext { + return k8s.KubeContext(string(env)) +} + +// A simplified version of the normal calculation we do +// about whether we can build direct to a cluser +func provideFakeDockerClusterEnv(c docker.Client, k8sEnv k8s.Env, kubeContext k8s.KubeContext, runtime container.Runtime) docker.ClusterEnv { + env := c.Env() + isDockerRuntime := runtime == container.RuntimeDocker + isLocalDockerCluster := k8sEnv == k8s.EnvMinikube || k8sEnv == k8s.EnvMicroK8s || k8sEnv == k8s.EnvDockerDesktop + if isDockerRuntime && isLocalDockerCluster { + env.BuildToKubeContexts = append(env.BuildToKubeContexts, string(kubeContext)) + } + + fake, ok := c.(*docker.FakeClient) + if ok { + fake.FakeEnv = env + } + + return docker.ClusterEnv(env) +} diff --git a/internal/engine/wire_gen.go b/internal/engine/wire_gen.go index 034853fba4..43e80f23e6 100644 --- a/internal/engine/wire_gen.go +++ b/internal/engine/wire_gen.go @@ -14,6 +14,7 @@ import ( "github.com/tilt-dev/tilt/internal/analytics" "github.com/tilt-dev/tilt/internal/build" + "github.com/tilt-dev/tilt/internal/container" "github.com/tilt-dev/tilt/internal/containerupdate" "github.com/tilt-dev/tilt/internal/docker" "github.com/tilt-dev/tilt/internal/dockercompose" @@ -25,20 +26,22 @@ import ( // Injectors from wire.go: -func provideBuildAndDeployer(ctx context.Context, docker2 docker.Client, kClient k8s.Client, dir *dirs.TiltDevDir, env k8s.Env, updateMode buildcontrol.UpdateModeFlag, dcc dockercompose.DockerComposeClient, clock build.Clock, kp buildcontrol.KINDLoader, analytics2 *analytics.TiltAnalytics) (buildcontrol.BuildAndDeployer, error) { +func provideFakeBuildAndDeployer(ctx context.Context, docker2 docker.Client, kClient k8s.Client, dir *dirs.TiltDevDir, env k8s.Env, updateMode buildcontrol.UpdateModeFlag, dcc dockercompose.DockerComposeClient, clock build.Clock, kp buildcontrol.KINDLoader, analytics2 *analytics.TiltAnalytics) (buildcontrol.BuildAndDeployer, error) { dockerUpdater := containerupdate.NewDockerUpdater(docker2) execUpdater := containerupdate.NewExecUpdater(kClient) + kubeContext := provideFakeKubeContext(env) runtime := k8s.ProvideContainerRuntime(ctx, kClient) - buildcontrolUpdateMode, err := buildcontrol.ProvideUpdateMode(updateMode, env, runtime) + clusterEnv := provideFakeDockerClusterEnv(docker2, env, kubeContext, runtime) + buildcontrolUpdateMode, err := buildcontrol.ProvideUpdateMode(updateMode, kubeContext, clusterEnv) if err != nil { return nil, err } - liveUpdateBuildAndDeployer := buildcontrol.NewLiveUpdateBuildAndDeployer(dockerUpdater, execUpdater, buildcontrolUpdateMode, env, runtime, clock) + liveUpdateBuildAndDeployer := buildcontrol.NewLiveUpdateBuildAndDeployer(dockerUpdater, execUpdater, buildcontrolUpdateMode, kubeContext, clock) labels := _wireLabelsValue dockerImageBuilder := build.NewDockerImageBuilder(docker2, labels) dockerBuilder := build.DefaultDockerBuilder(dockerImageBuilder) execCustomBuilder := build.NewExecCustomBuilder(docker2, clock) - imageBuildAndDeployer := buildcontrol.NewImageBuildAndDeployer(dockerBuilder, execCustomBuilder, kClient, env, analytics2, buildcontrolUpdateMode, clock, runtime, kp) + imageBuildAndDeployer := buildcontrol.NewImageBuildAndDeployer(dockerBuilder, execCustomBuilder, kClient, env, kubeContext, analytics2, buildcontrolUpdateMode, clock, kp) imageBuilder := buildcontrol.NewImageBuilder(dockerBuilder, execCustomBuilder, buildcontrolUpdateMode) dockerComposeBuildAndDeployer := buildcontrol.NewDockerComposeBuildAndDeployer(dcc, docker2, imageBuilder, clock) localTargetBuildAndDeployer := buildcontrol.NewLocalTargetBuildAndDeployer(clock) @@ -68,3 +71,25 @@ var DeployerWireSetTest = wire.NewSet( var DeployerWireSet = wire.NewSet( DeployerBaseWireSet, ) + +func provideFakeKubeContext(env k8s.Env) k8s.KubeContext { + return k8s.KubeContext(string(env)) +} + +// A simplified version of the normal calculation we do +// about whether we can build direct to a cluser +func provideFakeDockerClusterEnv(c docker.Client, k8sEnv k8s.Env, kubeContext k8s.KubeContext, runtime container.Runtime) docker.ClusterEnv { + env := c.Env() + isDockerRuntime := runtime == container.RuntimeDocker + isLocalDockerCluster := k8sEnv == k8s.EnvMinikube || k8sEnv == k8s.EnvMicroK8s || k8sEnv == k8s.EnvDockerDesktop + if isDockerRuntime && isLocalDockerCluster { + env.BuildToKubeContexts = append(env.BuildToKubeContexts, string(kubeContext)) + } + + fake, ok := c.(*docker.FakeClient) + if ok { + fake.FakeEnv = env + } + + return docker.ClusterEnv(env) +} diff --git a/internal/k8s/env.go b/internal/k8s/env.go index 15d778ee72..26118d5567 100644 --- a/internal/k8s/env.go +++ b/internal/k8s/env.go @@ -35,12 +35,15 @@ const ( EnvNone Env = "none" // k8s not running (not neces. a problem, e.g. if using Tilt x Docker Compose) ) -func (e Env) UsesLocalDockerRegistry() bool { - return e == EnvMinikube || e == EnvDockerDesktop || e == EnvMicroK8s -} - func (e Env) IsDevCluster() bool { - return e == EnvMinikube || e == EnvDockerDesktop || e == EnvMicroK8s || e == EnvCRC || e == EnvKIND5 || e == EnvKIND6 || e == EnvK3D || e == EnvKrucible + return e == EnvMinikube || + e == EnvDockerDesktop || + e == EnvMicroK8s || + e == EnvCRC || + e == EnvKIND5 || + e == EnvKIND6 || + e == EnvK3D || + e == EnvKrucible } func ProvideKubeContext(config *api.Config) (KubeContext, error) { diff --git a/internal/k8s/minikube.go b/internal/k8s/minikube.go index c7ba2b84f2..2fab553c34 100644 --- a/internal/k8s/minikube.go +++ b/internal/k8s/minikube.go @@ -17,9 +17,17 @@ import ( var envMatcher = regexp.MustCompile(`export (\w+)="([^"]+)"`) var versionMatcher = regexp.MustCompile(`^minikube version: v([0-9.]+)$`) +// Error messages if Minikube is running OK but docker-env is unsupported. +var dockerEnvUnsupportedMsgs = []string{ + "ENV_DRIVER_CONFLICT", + "ENV_MULTINODE_CONFLICT", + "ENV_DOCKER_UNAVAILABLE", + "The docker-env command is only compatible", +} + type MinikubeClient interface { Version(ctx context.Context) (string, error) - DockerEnv(ctx context.Context) (map[string]string, error) + DockerEnv(ctx context.Context) (map[string]string, bool, error) NodeIP(ctx context.Context) (NodeIP, error) } @@ -64,22 +72,32 @@ func minikubeVersionFromOutput(output []byte) (string, error) { return "", fmt.Errorf("version not found in output:\n%s", string(output)) } -func (mc minikubeClient) DockerEnv(ctx context.Context) (map[string]string, error) { +// Returns: +// - A map of env variables for the minikube docker-env. +// - True if this minikube supports a docker-env, false otherwise +// - An error if minikube doesn't appear to be running. +func (mc minikubeClient) DockerEnv(ctx context.Context) (map[string]string, bool, error) { cmd := mc.cmd(ctx, "docker-env", "--shell", "sh") output, err := cmd.Output() if err != nil { exitErr, isExitErr := err.(*exec.ExitError) if isExitErr { - // TODO(nick): Maybe we should automatically run minikube start? - return nil, fmt.Errorf("Could not read docker env from minikube.\n"+ - "Did you forget to run `minikube start`?\n%s", string(exitErr.Stderr)) + stderr := string(exitErr.Stderr) + for _, msg := range dockerEnvUnsupportedMsgs { + if strings.Contains(stderr, msg) { + return nil, false, nil + } + } + + return nil, false, fmt.Errorf("Could not read docker env from minikube.\n"+ + "Did you forget to run `minikube start`?\n%s", stderr) } - return nil, errors.Wrap(err, "Could not read docker env from minikube") + return nil, false, errors.Wrap(err, "Could not read docker env from minikube") } - return dockerEnvFromOutput(output) + return dockerEnvFromOutput(output), true, nil } -func dockerEnvFromOutput(output []byte) (map[string]string, error) { +func dockerEnvFromOutput(output []byte) map[string]string { result := make(map[string]string) scanner := bufio.NewScanner(bytes.NewBuffer(output)) for scanner.Scan() { @@ -91,7 +109,7 @@ func dockerEnvFromOutput(output []byte) (map[string]string, error) { } } - return result, nil + return result } func (mc minikubeClient) NodeIP(ctx context.Context) (NodeIP, error) { diff --git a/internal/k8s/minikube_fake.go b/internal/k8s/minikube_fake.go index 9895cdeae9..68d1139ffc 100644 --- a/internal/k8s/minikube_fake.go +++ b/internal/k8s/minikube_fake.go @@ -11,8 +11,8 @@ func (c FakeMinikube) Version(ctx context.Context) (string, error) { return c.FakeVersion, nil } -func (c FakeMinikube) DockerEnv(ctx context.Context) (map[string]string, error) { - return c.DockerEnvMap, nil +func (c FakeMinikube) DockerEnv(ctx context.Context) (map[string]string, bool, error) { + return c.DockerEnvMap, len(c.DockerEnvMap) > 0, nil } func (c FakeMinikube) NodeIP(ctx context.Context) (NodeIP, error) { diff --git a/internal/k8s/minikube_test.go b/internal/k8s/minikube_test.go index 0df8d17566..b5e8761b94 100644 --- a/internal/k8s/minikube_test.go +++ b/internal/k8s/minikube_test.go @@ -25,10 +25,7 @@ export DOCKER_API_VERSION="1.35" # eval $(minikube docker-env) `) - env, err := dockerEnvFromOutput(output) - if err != nil { - t.Fatal(err) - } + env := dockerEnvFromOutput(output) if len(env) != 4 || env["DOCKER_TLS_VERIFY"] != "1" ||