Skip to content

Commit

Permalink
Waiting for control plane to be fully upgraded before upgrading other…
Browse files Browse the repository at this point in the history
… components
  • Loading branch information
panktishah26 committed Oct 4, 2023
1 parent 0d8519c commit 2efb3ea
Show file tree
Hide file tree
Showing 3 changed files with 142 additions and 6 deletions.
1 change: 1 addition & 0 deletions pkg/clusterapi/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"

const (
ControlPlaneReadyCondition clusterv1.ConditionType = "ControlPlaneReady"
ReadyCondition clusterv1.ConditionType = "Ready"
)
33 changes: 29 additions & 4 deletions pkg/controller/clusters/clusterapi.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,40 @@ import (
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/features"
)

// CheckControlPlaneReady is a controller helper to check whether a CAPI cluster CP for
// an eks-a cluster is ready or not. This is intended to be used from cluster reconcilers
// CheckControlPlaneReady is a controller helper to check whether KCP object for
// the cluster is ready or not. This is intended to be used from cluster reconcilers
// due its signature and that it returns controller results with appropriate wait times whenever
// the cluster is not ready.
func CheckControlPlaneReady(ctx context.Context, client client.Client, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
if features.IsActive(features.ExperimentalSelfManagedClusterUpgrade()) {
kcp, err := controller.GetKubeadmControlPlane(ctx, client, cluster)
if err != nil {
return controller.Result{}, err
}

Check warning on line 26 in pkg/controller/clusters/clusterapi.go

View check run for this annotation

Codecov / codecov/patch

pkg/controller/clusters/clusterapi.go#L25-L26

Added lines #L25 - L26 were not covered by tests

if kcp == nil {
log.Info("KCP does not exist yet, requeuing")
return controller.ResultWithRequeue(5 * time.Second), nil
}

// We make sure to check that the status is up to date before using it
if kcp.Status.ObservedGeneration != kcp.ObjectMeta.Generation {
log.Info("KCP information is outdated, requeing")
return controller.ResultWithRequeue(5 * time.Second), nil
}

if !conditions.IsTrue(kcp, clusterapi.ReadyCondition) {
log.Info("KCP is not ready yet, requeing")
return controller.ResultWithRequeue(30 * time.Second), nil
}

log.Info("KCP is ready")
return controller.Result{}, nil
}

capiCluster, err := controller.GetCAPICluster(ctx, client, cluster)
if err != nil {
return controller.Result{}, err
Expand All @@ -27,13 +54,11 @@ func CheckControlPlaneReady(ctx context.Context, client client.Client, log logr.
log.Info("CAPI cluster does not exist yet, requeuing")
return controller.ResultWithRequeue(5 * time.Second), nil
}

if !conditions.IsTrue(capiCluster, clusterapi.ControlPlaneReadyCondition) {
log.Info("CAPI control plane is not ready yet, requeuing")
// TODO: eventually this can be implemented with controller watches
return controller.ResultWithRequeue(30 * time.Second), nil
}

log.Info("CAPI control plane is ready")
return controller.Result{}, nil
}
114 changes: 112 additions & 2 deletions pkg/controller/clusters/clusterapi_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package clusters_test

import (
"context"
"os"
"testing"
"time"

Expand All @@ -10,15 +11,18 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

"github.com/aws/eks-anywhere/internal/test"
_ "github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/features"
)

func TestCheckControlPlaneReadyItIsReady(t *testing.T) {
Expand Down Expand Up @@ -82,6 +86,92 @@ func TestCheckControlPlaneReadyErrorReading(t *testing.T) {
g.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
}

func TestCheckControlPlaneReadyItIsReadyWithKindlessUpgrade(t *testing.T) {
features.ClearCache()
os.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "true")

g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
k.Status.Conditions = clusterv1.Conditions{
{
Type: clusterapi.ReadyCondition,
Status: corev1.ConditionTrue,
},
}
})

client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(controller.Result{}))
}

func TestCheckControlPlaneReadyNoKcpWithKindlessUpgrade(t *testing.T) {
features.ClearCache()
os.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "true")

g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
client := fake.NewClientBuilder().WithObjects(eksaCluster).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 5 * time.Second}}),
)
}

func TestCheckControlPlaneNotReadyWithKindlessUpgrade(t *testing.T) {
features.ClearCache()
os.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "true")

g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
k.Status = v1beta1.KubeadmControlPlaneStatus{
ObservedGeneration: 2,
}
})

client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 5 * time.Second}}),
)
}

func TestCheckControlPlaneStatusNotReadyWithKindlessUpgrade(t *testing.T) {
features.ClearCache()
os.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "true")

g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
k.Status.Conditions = clusterv1.Conditions{
{
Type: clusterapi.ReadyCondition,
Status: corev1.ConditionFalse,
},
}
})

client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 30 * time.Second}}),
)
}

func eksaCluster() *anywherev1.Cluster {
return &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Expand All @@ -104,13 +194,33 @@ func capiCluster(opts ...capiClusterOpt) *clusterv1.Cluster {
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "eksa-system",
Namespace: constants.EksaSystemNamespace,
},
}

for _, opt := range opts {
opt(c)
}

return c
}

type kcpObjectOpt func(*v1beta1.KubeadmControlPlane)

func kcpObject(opts ...kcpObjectOpt) *v1beta1.KubeadmControlPlane {
k := &v1beta1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmControlPlane",
APIVersion: v1beta1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: constants.EksaSystemNamespace,
},
}

for _, opt := range opts {
opt(k)
}

return k
}

0 comments on commit 2efb3ea

Please sign in to comment.