Skip to content

Commit

Permalink
kubernetesapply: inject a frozen kubeconfig into k8s_custom_deploy
Browse files Browse the repository at this point in the history
Fixes #5703
  • Loading branch information
nicks committed May 9, 2022
1 parent e0425b9 commit 2e180a7
Show file tree
Hide file tree
Showing 3 changed files with 109 additions and 10 deletions.
58 changes: 52 additions & 6 deletions internal/controllers/core/kubernetesapply/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@ func (r *Reconciler) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) {
Watches(&source.Kind{Type: &v1alpha1.ImageMap{}},
handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)).
Watches(&source.Kind{Type: &v1alpha1.ConfigMap{}},
handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)).
Watches(&source.Kind{Type: &v1alpha1.Cluster{}},
handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue))

trigger.SetupControllerRestartOn(b, r.indexer, func(obj ctrlclient.Object) *v1alpha1.RestartOnSpec {
Expand Down Expand Up @@ -137,7 +139,15 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (
gcReason = "deleting disabled Kubernetes objects"
isDisabling = true
} else {
// Fetch all the images needed to apply this YAML.
// Fetch all the objects needed to apply this YAML.
var cluster v1alpha1.Cluster
if ka.Spec.Cluster != "" {
err := r.ctrlClient.Get(ctx, types.NamespacedName{Name: ka.Spec.Cluster}, &cluster)
if client.IgnoreNotFound(err) != nil {
return ctrl.Result{}, err
}
}

imageMaps, err := imagemap.NamesToObjects(ctx, r.ctrlClient, ka.Spec.ImageMaps)
if err != nil {
return ctrl.Result{}, err
Expand All @@ -153,8 +163,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (
// TODO(nick): Like with other reconcilers, there should always
// be a reason why we're not deploying, and we should update the
// Status field of KubernetesApply with that reason.
if r.shouldDeployOnReconcile(request.NamespacedName, &ka, imageMaps, lastRestartEvent) {
_ = r.forceApplyHelper(ctx, nn, ka.Spec, imageMaps)
if r.shouldDeployOnReconcile(request.NamespacedName, &ka, &cluster, imageMaps, lastRestartEvent) {
_ = r.forceApplyHelper(ctx, nn, ka.Spec, &cluster, imageMaps)
gcReason = "garbage collecting removed Kubernetes objects"
}
}
Expand All @@ -179,6 +189,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (
func (r *Reconciler) shouldDeployOnReconcile(
nn types.NamespacedName,
ka *v1alpha1.KubernetesApply,
cluster *v1alpha1.Cluster,
imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
lastRestartEvent metav1.MicroTime,
) bool {
Expand All @@ -189,6 +200,15 @@ func (r *Reconciler) shouldDeployOnReconcile(
return false
}

if ka.Spec.Cluster != "" {
isClusterOK := cluster != nil && cluster.Name != "" &&
cluster.Status.Error == "" && cluster.Status.Connection != nil
if !isClusterOK {
// Wait for the cluster to start.
return false
}
}

for _, imageMapName := range ka.Spec.ImageMaps {
_, ok := imageMaps[types.NamespacedName{Name: imageMapName}]
if !ok {
Expand Down Expand Up @@ -247,8 +267,9 @@ func (r *Reconciler) ForceApply(
ctx context.Context,
nn types.NamespacedName,
spec v1alpha1.KubernetesApplySpec,
cluster *v1alpha1.Cluster,
imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) v1alpha1.KubernetesApplyStatus {
status := r.forceApplyHelper(ctx, nn, spec, imageMaps)
status := r.forceApplyHelper(ctx, nn, spec, cluster, imageMaps)
r.requeuer.Add(nn)
return status
}
Expand All @@ -259,6 +280,7 @@ func (r *Reconciler) forceApplyHelper(
ctx context.Context,
nn types.NamespacedName,
spec v1alpha1.KubernetesApplySpec,
cluster *v1alpha1.Cluster,
imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
) v1alpha1.KubernetesApplyStatus {

Expand Down Expand Up @@ -286,7 +308,7 @@ func (r *Reconciler) forceApplyHelper(
return recordErrorStatus(err)
}
} else {
deployed, err = r.runCmdDeploy(deployCtx, spec, imageMaps)
deployed, err = r.runCmdDeploy(deployCtx, spec, cluster, imageMaps)
if err != nil {
return recordErrorStatus(err)
}
Expand Down Expand Up @@ -344,7 +366,22 @@ func (r *Reconciler) runYAMLDeploy(ctx context.Context, spec v1alpha1.Kubernetes
return deployed, nil
}

func (r *Reconciler) runCmdDeploy(ctx context.Context, spec v1alpha1.KubernetesApplySpec, imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) ([]k8s.K8sEntity, error) {
func (r *Reconciler) maybeInjectKubeconfig(cmd *model.Cmd, cluster *v1alpha1.Cluster) {
if cluster == nil ||
cluster.Status.Connection == nil ||
cluster.Status.Connection.Kubernetes == nil {
return
}
kubeconfig := cluster.Status.Connection.Kubernetes.ConfigPath
if kubeconfig == "" {
return
}
cmd.Env = append(cmd.Env, fmt.Sprintf("KUBECONFIG=%s", kubeconfig))
}

func (r *Reconciler) runCmdDeploy(ctx context.Context, spec v1alpha1.KubernetesApplySpec,
cluster *v1alpha1.Cluster,
imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) ([]k8s.K8sEntity, error) {
timeout := spec.Timeout.Duration
if timeout == 0 {
timeout = v1alpha1.KubernetesApplyTimeoutDefault
Expand All @@ -363,6 +400,7 @@ func (r *Reconciler) runCmdDeploy(ctx context.Context, spec v1alpha1.KubernetesA
if err != nil {
return nil, err
}
r.maybeInjectKubeconfig(&cmd, cluster)

logger.Get(ctx).Infof("Running cmd: %s", cmd.String())
exitCode, err := r.execer.Run(ctx, cmd, runIO)
Expand Down Expand Up @@ -842,6 +880,7 @@ func (r *Reconciler) bestEffortDelete(ctx context.Context, nn types.NamespacedNa
}

var imGVK = v1alpha1.SchemeGroupVersion.WithKind("ImageMap")
var clusterGVK = v1alpha1.SchemeGroupVersion.WithKind("Cluster")

// indexKubernetesApply returns keys for all the objects we need to watch based on the spec.
func indexKubernetesApply(obj client.Object) []indexer.Key {
Expand All @@ -853,6 +892,12 @@ func indexKubernetesApply(obj client.Object) []indexer.Key {
GVK: imGVK,
})
}
if ka.Spec.Cluster != "" {
result = append(result, indexer.Key{
Name: types.NamespacedName{Name: ka.Spec.Cluster},
GVK: clusterGVK,
})
}

if ka.Spec.DisableSource != nil {
cm := ka.Spec.DisableSource.ConfigMap
Expand All @@ -870,6 +915,7 @@ func indexKubernetesApply(obj client.Object) []indexer.Key {
// Keeps track of the state we currently know about.
type Result struct {
Spec v1alpha1.KubernetesApplySpec
ClusterStatus v1alpha1.ClusterStatus
ImageMapSpecs []v1alpha1.ImageMapSpec
ImageMapStatuses []v1alpha1.ImageMapStatus

Expand Down
55 changes: 53 additions & 2 deletions internal/controllers/core/kubernetesapply/reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,43 @@ func TestApplyCmdWithImages(t *testing.T) {
}
}

func TestApplyCmdWithKubeconfig(t *testing.T) {
f := newFixture(t)

f.Create(&v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "default-cluster",
},
Status: v1alpha1.ClusterStatus{
Connection: &v1alpha1.ClusterConnectionStatus{
Kubernetes: &v1alpha1.KubernetesClusterConnectionStatus{
ConfigPath: "/path/to/my/kubeconfig",
},
},
},
})

applyCmd, _ := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML)
ka := v1alpha1.KubernetesApply{
ObjectMeta: metav1.ObjectMeta{
Name: "a",
},
Spec: v1alpha1.KubernetesApplySpec{
Cluster: "default-cluster",
ApplyCmd: &applyCmd,
DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}},
},
}
f.Create(&ka)

if assert.Len(t, f.execer.Calls(), 1) {
call := f.execer.Calls()[0]
assert.Equal(t, []string{
"KUBECONFIG=/path/to/my/kubeconfig",
}, call.Cmd.Env)
}
}

func TestBasicApplyCmd_ExecError(t *testing.T) {
f := newFixture(t)

Expand Down Expand Up @@ -522,7 +559,7 @@ func TestIgnoreManagedObjects(t *testing.T) {
assert.Empty(f.T(), ka.Status.ResultYAML)
assert.Zero(f.T(), ka.Status.LastApplyTime)

result := f.r.ForceApply(f.Context(), nn, ka.Spec, nil)
result := f.r.ForceApply(f.Context(), nn, ka.Spec, nil, nil)
assert.Contains(f.T(), result.ResultYAML, "sancho")
assert.True(f.T(), !result.LastApplyTime.IsZero())
assert.True(f.T(), !result.LastApplyStartTime.IsZero())
Expand Down Expand Up @@ -687,12 +724,26 @@ func newFixture(t *testing.T) *fixture {
db := build.NewDockerBuilder(dockerClient, dockerfile.Labels{})
r := NewReconciler(cfb.Client, kClient, v1alpha1.NewScheme(), db, cfb.Store, execer)

return &fixture{
f := &fixture{
ControllerFixture: cfb.Build(r),
r: r,
kClient: kClient,
execer: execer,
}
f.Create(&v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
},
Status: v1alpha1.ClusterStatus{
Connection: &v1alpha1.ClusterConnectionStatus{
Kubernetes: &v1alpha1.KubernetesClusterConnectionStatus{
Context: "default",
},
},
},
})

return f
}

// createApplyCmd creates a KubernetesApplyCmd that use the passed YAML to generate simulated stdout via the FakeExecer.
Expand Down
6 changes: 4 additions & 2 deletions internal/engine/buildcontrol/image_build_and_deployer.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,8 @@ func (ibd *ImageBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.R

// (If we pass an empty list of refs here (as we will do if only deploying
// yaml), we just don't inject any image refs into the yaml, nbd.
k8sResult, err := ibd.deploy(ctx, st, ps, kTarget.ID(), kTarget.KubernetesApplySpec, imageMapSet)
cluster := stateSet[kTarget.ID()].ClusterOrEmpty()
k8sResult, err := ibd.deploy(ctx, st, ps, kTarget.ID(), kTarget.KubernetesApplySpec, cluster, imageMapSet)
if err != nil {
return newResults, WrapDontFallBackError(err)
}
Expand Down Expand Up @@ -167,12 +168,13 @@ func (ibd *ImageBuildAndDeployer) deploy(
ps *build.PipelineState,
kTargetID model.TargetID,
spec v1alpha1.KubernetesApplySpec,
cluster *v1alpha1.Cluster,
imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) (store.K8sBuildResult, error) {
ps.StartPipelineStep(ctx, "Deploying")
defer ps.EndPipelineStep(ctx)

kTargetNN := types.NamespacedName{Name: kTargetID.Name.String()}
status := ibd.r.ForceApply(ctx, kTargetNN, spec, imageMaps)
status := ibd.r.ForceApply(ctx, kTargetNN, spec, cluster, imageMaps)
if status.Error != "" {
return store.K8sBuildResult{}, fmt.Errorf("%s", status.Error)
}
Expand Down

0 comments on commit 2e180a7

Please sign in to comment.