diff --git a/.circleci/config.yml b/.circleci/config.yml index c9093f34d..05cdf0adb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -82,7 +82,7 @@ jobs: - run: test/e2e-istio.sh - run: test/e2e-tests.sh - e2e-kubernetes-testing: + e2e-kubernetes-daemonset-testing: machine: true steps: - checkout @@ -91,7 +91,18 @@ jobs: - run: test/container-build.sh - run: test/e2e-kind.sh v1.17.0 - run: test/e2e-kubernetes.sh - - run: test/e2e-kubernetes-tests.sh + - run: test/e2e-kubernetes-tests-daemonset.sh + + e2e-kubernetes-deployment-testing: + machine: true + steps: + - checkout + - attach_workspace: + at: /tmp/bin + - run: test/container-build.sh + - run: test/e2e-kind.sh v1.17.0 + - run: test/e2e-kubernetes.sh + - run: test/e2e-kubernetes-tests-deployment.sh e2e-kubernetes-svc-testing: machine: true @@ -215,7 +226,10 @@ workflows: - e2e-istio-testing: requires: - build-binary - - e2e-kubernetes-testing: + - e2e-kubernetes-daemonset-testing: + requires: + - build-binary + - e2e-kubernetes-deployment-testing: requires: - build-binary - e2e-gloo-testing: @@ -234,7 +248,8 @@ workflows: requires: - build-binary - e2e-istio-testing - - e2e-kubernetes-testing + - e2e-kubernetes-daemonset-testing + - e2e-kubernetes-deployment-testing - e2e-gloo-testing - e2e-nginx-testing - e2e-linkerd-testing diff --git a/artifacts/flagger/account.yaml b/artifacts/flagger/account.yaml index 68ffc3a03..c71e6108e 100644 --- a/artifacts/flagger/account.yaml +++ b/artifacts/flagger/account.yaml @@ -24,6 +24,7 @@ rules: - apiGroups: - apps resources: + - daemonsets - deployments verbs: ["*"] - apiGroups: diff --git a/artifacts/flagger/crd.yaml b/artifacts/flagger/crd.yaml index 2b6269a8c..786dfb7e2 100644 --- a/artifacts/flagger/crd.yaml +++ b/artifacts/flagger/crd.yaml @@ -87,6 +87,7 @@ spec: kind: type: string enum: + - DaemonSet - Deployment - Service name: diff --git a/charts/flagger/crds/crd.yaml b/charts/flagger/crds/crd.yaml index 2b6269a8c..786dfb7e2 100644 --- a/charts/flagger/crds/crd.yaml +++ b/charts/flagger/crds/crd.yaml @@ -87,6 +87,7 @@ spec: kind: type: string enum: + - DaemonSet - Deployment - Service name: diff --git a/charts/flagger/templates/rbac.yaml b/charts/flagger/templates/rbac.yaml index 2013fe919..4306a2e18 100644 --- a/charts/flagger/templates/rbac.yaml +++ b/charts/flagger/templates/rbac.yaml @@ -20,6 +20,7 @@ rules: - apiGroups: - apps resources: + - daemonsets - deployments verbs: ["*"] - apiGroups: diff --git a/kustomize/base/flagger/crd.yaml b/kustomize/base/flagger/crd.yaml index 2b6269a8c..786dfb7e2 100644 --- a/kustomize/base/flagger/crd.yaml +++ b/kustomize/base/flagger/crd.yaml @@ -87,6 +87,7 @@ spec: kind: type: string enum: + - DaemonSet - Deployment - Service name: diff --git a/kustomize/base/flagger/rbac.yaml b/kustomize/base/flagger/rbac.yaml index b4bf38f0d..5383684cc 100644 --- a/kustomize/base/flagger/rbac.yaml +++ b/kustomize/base/flagger/rbac.yaml @@ -14,6 +14,7 @@ rules: - apiGroups: - apps resources: + - daemonsets - deployments verbs: ["*"] - apiGroups: diff --git a/pkg/canary/config_tracker.go b/pkg/canary/config_tracker.go index bf64cd499..e3922fcb7 100644 --- a/pkg/canary/config_tracker.go +++ b/pkg/canary/config_tracker.go @@ -93,16 +93,36 @@ func (ct *ConfigTracker) getRefFromSecret(name string, namespace string) (*Confi func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]ConfigRef, error) { res := make(map[string]ConfigRef) targetName := cd.Spec.TargetRef.Name - targetDep, err := ct.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return res, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace) + + var vs []corev1.Volume + var cs []corev1.Container + switch cd.Spec.TargetRef.Kind { + case "Deployment": + targetDep, err := ct.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return res, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace) + } + return res, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err) + } + vs = targetDep.Spec.Template.Spec.Volumes + cs = targetDep.Spec.Template.Spec.Containers + case "DaemonSet": + targetDae, err := ct.KubeClient.AppsV1().DaemonSets(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return res, fmt.Errorf("daemonset %s.%s not found", targetName, cd.Namespace) + } + return res, fmt.Errorf("daemonset %s.%s query error %v", targetName, cd.Namespace, err) } - return res, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err) + vs = targetDae.Spec.Template.Spec.Volumes + cs = targetDae.Spec.Template.Spec.Containers + default: + return nil, fmt.Errorf("TargetRef.Kind invalid: %s", cd.Spec.TargetRef.Kind) } // scan volumes - for _, volume := range targetDep.Spec.Template.Spec.Volumes { + for _, volume := range vs { if cmv := volume.ConfigMap; cmv != nil { config, err := ct.getRefFromConfigMap(cmv.Name, cd.Namespace) if err != nil { @@ -152,7 +172,7 @@ func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]Conf } } // scan containers - for _, container := range targetDep.Spec.Template.Spec.Containers { + for _, container := range cs { // scan env for _, env := range container.Env { if env.ValueFrom != nil { diff --git a/pkg/canary/config_tracker_test.go b/pkg/canary/config_tracker_test.go index b85503559..c45a1bd69 100644 --- a/pkg/canary/config_tracker_test.go +++ b/pkg/canary/config_tracker_test.go @@ -7,125 +7,253 @@ import ( ) func TestConfigTracker_ConfigMaps(t *testing.T) { - mocks := newFixture() - configMap := newTestConfigMap() - configMapProjected := newTestConfigProjected() - - err := mocks.deployer.Initialize(mocks.canary, true) - if err != nil { - t.Fatal(err.Error()) - } - - depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - configPrimaryVolName := depPrimary.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name - if configPrimaryVolName != "podinfo-config-vol-primary" { - t.Errorf("Got config name %v wanted %v", configPrimaryVolName, "podinfo-config-vol-primary") - } - - configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - if configPrimary.Data["color"] != configMap.Data["color"] { - t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"]) - } - - configPrimaryEnv, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-all-env-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - if configPrimaryEnv.Data["color"] != configMap.Data["color"] { - t.Errorf("Got ConfigMap %s wanted %s", configPrimaryEnv.Data["a"], configMap.Data["color"]) - } - - configPrimaryVol, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-vol-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - if configPrimaryVol.Data["color"] != configMap.Data["color"] { - t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"]) - } - - configProjectedName := depPrimary.Spec.Template.Spec.Volumes[2].VolumeSource.Projected.Sources[0].ConfigMap.Name - if configProjectedName != "podinfo-config-projected-primary" { - t.Errorf("Got config name %v wanted %v", configProjectedName, "podinfo-config-projected-primary") - } - - configPrimaryProjected, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-vol-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - if configPrimaryProjected.Data["color"] != configMapProjected.Data["color"] { - t.Errorf("Got ConfigMap color %s wanted %s", configPrimaryProjected.Data["color"], configMapProjected.Data["color"]) - } + t.Run("deployment", func(t *testing.T) { + mocks := newDeploymentFixture() + configMap := newDeploymentControllerTestConfigMap() + configMapProjected := newDeploymentControllerTestConfigProjected() + + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + configPrimaryVolName := depPrimary.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name + if configPrimaryVolName != "podinfo-config-vol-primary" { + t.Errorf("Got config name %v wanted %v", configPrimaryVolName, "podinfo-config-vol-primary") + } + + configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimary.Data["color"] != configMap.Data["color"] { + t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"]) + } + + configPrimaryEnv, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-all-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimaryEnv.Data["color"] != configMap.Data["color"] { + t.Errorf("Got ConfigMap %s wanted %s", configPrimaryEnv.Data["a"], configMap.Data["color"]) + } + + configPrimaryVol, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-vol-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimaryVol.Data["color"] != configMap.Data["color"] { + t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"]) + } + + configProjectedName := depPrimary.Spec.Template.Spec.Volumes[2].VolumeSource.Projected.Sources[0].ConfigMap.Name + if configProjectedName != "podinfo-config-projected-primary" { + t.Errorf("Got config name %v wanted %v", configProjectedName, "podinfo-config-projected-primary") + } + + configPrimaryProjected, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-vol-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimaryProjected.Data["color"] != configMapProjected.Data["color"] { + t.Errorf("Got ConfigMap color %s wanted %s", configPrimaryProjected.Data["color"], configMapProjected.Data["color"]) + } + }) + + t.Run("daemonset", func(t *testing.T) { + mocks := newDaemonSetFixture() + configMap := newDaemonSetControllerTestConfigMap() + configMapProjected := newDaemonSetControllerTestConfigProjected() + + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + depPrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + configPrimaryVolName := depPrimary.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name + if configPrimaryVolName != "podinfo-config-vol-primary" { + t.Errorf("Got config name %v wanted %v", configPrimaryVolName, "podinfo-config-vol-primary") + } + + configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimary.Data["color"] != configMap.Data["color"] { + t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"]) + } + + configPrimaryEnv, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-all-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimaryEnv.Data["color"] != configMap.Data["color"] { + t.Errorf("Got ConfigMap %s wanted %s", configPrimaryEnv.Data["a"], configMap.Data["color"]) + } + + configPrimaryVol, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-vol-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimaryVol.Data["color"] != configMap.Data["color"] { + t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"]) + } + + configProjectedName := depPrimary.Spec.Template.Spec.Volumes[2].VolumeSource.Projected.Sources[0].ConfigMap.Name + if configProjectedName != "podinfo-config-projected-primary" { + t.Errorf("Got config name %v wanted %v", configProjectedName, "podinfo-config-projected-primary") + } + + configPrimaryProjected, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-vol-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimaryProjected.Data["color"] != configMapProjected.Data["color"] { + t.Errorf("Got ConfigMap color %s wanted %s", configPrimaryProjected.Data["color"], configMapProjected.Data["color"]) + } + }) } func TestConfigTracker_Secrets(t *testing.T) { - mocks := newFixture() - secret := newTestSecret() - secretProjected := newTestSecretProjected() - - err := mocks.deployer.Initialize(mocks.canary, true) - if err != nil { - t.Fatal(err.Error()) - } - - depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - secretPrimaryVolName := depPrimary.Spec.Template.Spec.Volumes[1].VolumeSource.Secret.SecretName - if secretPrimaryVolName != "podinfo-secret-vol-primary" { - t.Errorf("Got config name %v wanted %v", secretPrimaryVolName, "podinfo-secret-vol-primary") - } - - secretPrimary, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-env-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - if string(secretPrimary.Data["apiKey"]) != string(secret.Data["apiKey"]) { - t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"]) - } - - secretPrimaryEnv, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-all-env-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - if string(secretPrimaryEnv.Data["apiKey"]) != string(secret.Data["apiKey"]) { - t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"]) - } - - secretPrimaryVol, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-vol-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - if string(secretPrimaryVol.Data["apiKey"]) != string(secret.Data["apiKey"]) { - t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"]) - } - - secretProjectedName := depPrimary.Spec.Template.Spec.Volumes[2].VolumeSource.Projected.Sources[1].Secret.Name - if secretProjectedName != "podinfo-secret-projected-primary" { - t.Errorf("Got config name %v wanted %v", secretProjectedName, "podinfo-secret-projected-primary") - } - - secretPrimaryProjected, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-projected-primary", metav1.GetOptions{}) - if err != nil { - t.Fatal(err.Error()) - } - - if string(secretPrimaryProjected.Data["apiKey"]) != string(secretProjected.Data["apiKey"]) { - t.Errorf("Got primary secret %s wanted %s", secretPrimaryProjected.Data["apiKey"], secretProjected.Data["apiKey"]) - } + t.Run("deployment", func(t *testing.T) { + mocks := newDeploymentFixture() + secret := newDeploymentControllerTestSecret() + secretProjected := newDeploymentControllerTestSecretProjected() + + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + secretPrimaryVolName := depPrimary.Spec.Template.Spec.Volumes[1].VolumeSource.Secret.SecretName + if secretPrimaryVolName != "podinfo-secret-vol-primary" { + t.Errorf("Got config name %v wanted %v", secretPrimaryVolName, "podinfo-secret-vol-primary") + } + + secretPrimary, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if string(secretPrimary.Data["apiKey"]) != string(secret.Data["apiKey"]) { + t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"]) + } + + secretPrimaryEnv, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-all-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if string(secretPrimaryEnv.Data["apiKey"]) != string(secret.Data["apiKey"]) { + t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"]) + } + + secretPrimaryVol, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-vol-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if string(secretPrimaryVol.Data["apiKey"]) != string(secret.Data["apiKey"]) { + t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"]) + } + + secretProjectedName := depPrimary.Spec.Template.Spec.Volumes[2].VolumeSource.Projected.Sources[1].Secret.Name + if secretProjectedName != "podinfo-secret-projected-primary" { + t.Errorf("Got config name %v wanted %v", secretProjectedName, "podinfo-secret-projected-primary") + } + + secretPrimaryProjected, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-projected-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if string(secretPrimaryProjected.Data["apiKey"]) != string(secretProjected.Data["apiKey"]) { + t.Errorf("Got primary secret %s wanted %s", secretPrimaryProjected.Data["apiKey"], secretProjected.Data["apiKey"]) + } + }) + + t.Run("daemonset", func(t *testing.T) { + mocks := newDaemonSetFixture() + secret := newDaemonSetControllerTestSecret() + secretProjected := newDaemonSetControllerTestSecretProjected() + + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + depPrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + secretPrimaryVolName := depPrimary.Spec.Template.Spec.Volumes[1].VolumeSource.Secret.SecretName + if secretPrimaryVolName != "podinfo-secret-vol-primary" { + t.Errorf("Got config name %v wanted %v", secretPrimaryVolName, "podinfo-secret-vol-primary") + } + + secretPrimary, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if string(secretPrimary.Data["apiKey"]) != string(secret.Data["apiKey"]) { + t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"]) + } + + secretPrimaryEnv, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-all-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if string(secretPrimaryEnv.Data["apiKey"]) != string(secret.Data["apiKey"]) { + t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"]) + } + + secretPrimaryVol, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-vol-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if string(secretPrimaryVol.Data["apiKey"]) != string(secret.Data["apiKey"]) { + t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"]) + } + + secretProjectedName := depPrimary.Spec.Template.Spec.Volumes[2].VolumeSource.Projected.Sources[1].Secret.Name + if secretProjectedName != "podinfo-secret-projected-primary" { + t.Errorf("Got config name %v wanted %v", secretProjectedName, "podinfo-secret-projected-primary") + } + + secretPrimaryProjected, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-projected-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if string(secretPrimaryProjected.Data["apiKey"]) != string(secretProjected.Data["apiKey"]) { + t.Errorf("Got primary secret %s wanted %s", secretPrimaryProjected.Data["apiKey"], secretProjected.Data["apiKey"]) + } + }) } diff --git a/pkg/canary/daemonset_controller.go b/pkg/canary/daemonset_controller.go new file mode 100644 index 000000000..1c07c7fcf --- /dev/null +++ b/pkg/canary/daemonset_controller.go @@ -0,0 +1,328 @@ +package canary + +import ( + "fmt" + + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" + clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned" +) + +var ( + daemonSetScaleDownNodeSelector = map[string]string{"flagger.app/scale-to-zero": "true"} +) + +// DaemonSetController is managing the operations for Kubernetes DaemonSet kind +type DaemonSetController struct { + kubeClient kubernetes.Interface + flaggerClient clientset.Interface + logger *zap.SugaredLogger + configTracker Tracker + labels []string +} + +func (c *DaemonSetController) Scale(cd *flaggerv1.Canary, v int32) error { + // there's no concept `replicas` for DaemonSet + if v == 0 { + targetName := cd.Spec.TargetRef.Name + dae, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("daemonset %s.%s not found", targetName, cd.Namespace) + } + return fmt.Errorf("daemonset %s.%s query error %v", targetName, cd.Namespace, err) + } + + daeCopy := dae.DeepCopy() + daeCopy.Spec.Template.Spec.NodeSelector = make(map[string]string, + len(dae.Spec.Template.Spec.NodeSelector)+len(daemonSetScaleDownNodeSelector)) + for k, v := range dae.Spec.Template.Spec.NodeSelector { + daeCopy.Spec.Template.Spec.NodeSelector[k] = v + } + + for k, v := range daemonSetScaleDownNodeSelector { + daeCopy.Spec.Template.Spec.NodeSelector[k] = v + } + + _, err = c.kubeClient.AppsV1().DaemonSets(dae.Namespace).Update(daeCopy) + if err != nil { + return fmt.Errorf("scaling down daemonset %s.%s failed: %v", daeCopy.GetName(), daeCopy.Namespace, err) + } + } + return nil +} + +func (c *DaemonSetController) ScaleFromZero(cd *flaggerv1.Canary) error { + targetName := cd.Spec.TargetRef.Name + dep, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("daemonset %s.%s not found", targetName, cd.Namespace) + } + return fmt.Errorf("daemonset %s.%s query error %v", targetName, cd.Namespace, err) + } + + depCopy := dep.DeepCopy() + for k := range daemonSetScaleDownNodeSelector { + delete(depCopy.Spec.Template.Spec.NodeSelector, k) + } + + _, err = c.kubeClient.AppsV1().DaemonSets(dep.Namespace).Update(depCopy) + if err != nil { + return fmt.Errorf("scaling up daemonset %s.%s failed: %v", depCopy.GetName(), depCopy.Namespace, err) + } + return nil +} + +// Initialize creates the primary DaemonSet and +// delete the canary DaemonSet and returns the pod selector label and container ports +func (c *DaemonSetController) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (err error) { + primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) + err = c.createPrimaryDaemonSet(cd) + if err != nil { + return fmt.Errorf("creating daemonset %s.%s failed: %v", primaryName, cd.Namespace, err) + } + + if cd.Status.Phase == "" || cd.Status.Phase == flaggerv1.CanaryPhaseInitializing { + if !skipLivenessChecks && !cd.Spec.SkipAnalysis { + _, readyErr := c.IsPrimaryReady(cd) + if readyErr != nil { + return readyErr + } + } + + c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace) + if err := c.Scale(cd, 0); err != nil { + return err + } + } + return nil +} + +// Promote copies the pod spec, secrets and config maps from canary to primary +func (c *DaemonSetController) Promote(cd *flaggerv1.Canary) error { + targetName := cd.Spec.TargetRef.Name + primaryName := fmt.Sprintf("%s-primary", targetName) + + canary, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("damonset %s.%s not found", targetName, cd.Namespace) + } + return fmt.Errorf("damonset %s.%s query error %v", targetName, cd.Namespace, err) + } + + label, err := c.getSelectorLabel(canary) + if err != nil { + return fmt.Errorf("invalid label selector! DaemonSet %s.%s spec.selector.matchLabels must contain selector 'app: %s'", + targetName, cd.Namespace, targetName) + } + + primary, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(primaryName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("daemonset %s.%s not found", primaryName, cd.Namespace) + } + return fmt.Errorf("daemonset %s.%s query error %v", primaryName, cd.Namespace, err) + } + + // promote secrets and config maps + configRefs, err := c.configTracker.GetTargetConfigs(cd) + if err != nil { + return err + } + if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil { + return err + } + + primaryCopy := primary.DeepCopy() + primaryCopy.Spec.MinReadySeconds = canary.Spec.MinReadySeconds + primaryCopy.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit + primaryCopy.Spec.UpdateStrategy = canary.Spec.UpdateStrategy + + // update spec with primary secrets and config maps + primaryCopy.Spec.Template.Spec = c.configTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs) + + // ignore `daemonSetScaleDownNodeSelector` node selector + for key := range daemonSetScaleDownNodeSelector { + delete(primaryCopy.Spec.Template.Spec.NodeSelector, key) + } + + // update pod annotations to ensure a rolling update + annotations, err := makeAnnotations(canary.Spec.Template.Annotations) + if err != nil { + return err + } + primaryCopy.Spec.Template.Annotations = annotations + + primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label) + + // apply update + _, err = c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Update(primaryCopy) + if err != nil { + return fmt.Errorf("updating deployment %s.%s template spec failed: %v", + primaryCopy.GetName(), primaryCopy.Namespace, err) + } + return nil +} + +// HasTargetChanged returns true if the canary DaemonSet pod spec has changed +func (c *DaemonSetController) HasTargetChanged(cd *flaggerv1.Canary) (bool, error) { + targetName := cd.Spec.TargetRef.Name + canary, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return false, fmt.Errorf("daemonset %s.%s not found", targetName, cd.Namespace) + } + return false, fmt.Errorf("daemonset %s.%s query error %v", targetName, cd.Namespace, err) + } + + // ignore `daemonSetScaleDownNodeSelector` node selector + for key := range daemonSetScaleDownNodeSelector { + delete(canary.Spec.Template.Spec.NodeSelector, key) + } + + // since nil and capacity zero map would have different hash, we have to initialize here + if canary.Spec.Template.Spec.NodeSelector == nil { + canary.Spec.Template.Spec.NodeSelector = map[string]string{} + } + + return hasSpecChanged(cd, canary.Spec.Template) +} + +// GetMetadata returns the pod label selector and svc ports +func (c *DaemonSetController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) { + targetName := cd.Spec.TargetRef.Name + + canaryDae, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return "", nil, fmt.Errorf("daemonset %s.%s not found, retrying", targetName, cd.Namespace) + } + return "", nil, err + } + + label, err := c.getSelectorLabel(canaryDae) + if err != nil { + return "", nil, fmt.Errorf("invalid label selector! DaemonSet %s.%s spec.selector.matchLabels must contain selector 'app: %s'", + targetName, cd.Namespace, targetName) + } + + var ports map[string]int32 + if cd.Spec.Service.PortDiscovery { + p, err := getPorts(cd, canaryDae.Spec.Template.Spec.Containers) + if err != nil { + return "", nil, fmt.Errorf("port discovery failed with error: %v", err) + } + ports = p + } + + return label, ports, nil +} + +func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error { + targetName := cd.Spec.TargetRef.Name + primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) + + canaryDae, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("daemonset %s.%s not found, retrying", targetName, cd.Namespace) + } + return err + } + + if canaryDae.Spec.UpdateStrategy.Type != "" && + canaryDae.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { + return fmt.Errorf("daemonset %s.%s must have RollingUpdate strategy but have %s", + targetName, cd.Namespace, canaryDae.Spec.UpdateStrategy.Type) + } + + label, err := c.getSelectorLabel(canaryDae) + if err != nil { + return fmt.Errorf("invalid label selector! DaemonSet %s.%s spec.selector.matchLabels must contain selector 'app: %s'", + targetName, cd.Namespace, targetName) + } + + primaryDep, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(primaryName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + // create primary secrets and config maps + configRefs, err := c.configTracker.GetTargetConfigs(cd) + if err != nil { + return err + } + if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil { + return err + } + annotations, err := makeAnnotations(canaryDae.Spec.Template.Annotations) + if err != nil { + return err + } + + // create primary deployment + primaryDep = &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: primaryName, + Namespace: cd.Namespace, + Labels: map[string]string{ + label: primaryName, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(cd, schema.GroupVersionKind{ + Group: flaggerv1.SchemeGroupVersion.Group, + Version: flaggerv1.SchemeGroupVersion.Version, + Kind: flaggerv1.CanaryKind, + }), + }, + }, + Spec: appsv1.DaemonSetSpec{ + MinReadySeconds: canaryDae.Spec.MinReadySeconds, + RevisionHistoryLimit: canaryDae.Spec.RevisionHistoryLimit, + UpdateStrategy: canaryDae.Spec.UpdateStrategy, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + label: primaryName, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: makePrimaryLabels(canaryDae.Spec.Template.Labels, primaryName, label), + Annotations: annotations, + }, + // update spec with the primary secrets and config maps + Spec: c.configTracker.ApplyPrimaryConfigs(canaryDae.Spec.Template.Spec, configRefs), + }, + }, + } + + _, err = c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Create(primaryDep) + if err != nil { + return err + } + + c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("DaemonSet %s.%s created", primaryDep.GetName(), cd.Namespace) + } + return nil +} + +// getSelectorLabel returns the selector match label +func (c *DaemonSetController) getSelectorLabel(daemonSet *appsv1.DaemonSet) (string, error) { + for _, l := range c.labels { + if _, ok := daemonSet.Spec.Selector.MatchLabels[l]; ok { + return l, nil + } + } + + return "", fmt.Errorf("selector not found") +} + +func (c *DaemonSetController) HaveDependenciesChanged(cd *flaggerv1.Canary) (bool, error) { + return c.configTracker.HasConfigChanged(cd) +} diff --git a/pkg/canary/daemonset_controller_test.go b/pkg/canary/daemonset_controller_test.go new file mode 100644 index 000000000..997441eee --- /dev/null +++ b/pkg/canary/daemonset_controller_test.go @@ -0,0 +1,267 @@ +package canary + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +) + +func TestDaemonSetController_Sync(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + depPrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + dep := newDaemonSetControllerTestPodInfo() + + primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image + sourceImage := dep.Spec.Template.Spec.Containers[0].Image + if primaryImage != sourceImage { + t.Errorf("Got image %s wanted %s", primaryImage, sourceImage) + } +} + +func TestDaemonSetController_Promote(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + dep2 := newDaemonSetControllerTestPodInfoV2() + _, err = mocks.kubeClient.AppsV1().DaemonSets("default").Update(dep2) + if err != nil { + t.Fatal(err.Error()) + } + + config2 := newDaemonSetControllerTestConfigMapV2() + _, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Update(config2) + if err != nil { + t.Fatal(err.Error()) + } + + err = mocks.controller.Promote(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } + + depPrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image + sourceImage := dep2.Spec.Template.Spec.Containers[0].Image + if primaryImage != sourceImage { + t.Errorf("Got image %s wanted %s", primaryImage, sourceImage) + } + + configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimary.Data["color"] != config2.Data["color"] { + t.Errorf("Got primary ConfigMap color %s wanted %s", configPrimary.Data["color"], config2.Data["color"]) + } +} + +func TestDaemonSetController_NoConfigTracking(t *testing.T) { + mocks := newDaemonSetFixture() + mocks.controller.configTracker = &NopTracker{} + + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + depPrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + _, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{}) + if !errors.IsNotFound(err) { + t.Fatalf("Primary ConfigMap shouldn't have been created") + } + + configName := depPrimary.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name + if configName != "podinfo-config-vol" { + t.Errorf("Got config name %v wanted %v", configName, "podinfo-config-vol") + } +} + +func TestDaemonSetController_HasTargetChanged(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + // save last applied hash + canary, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + err = mocks.controller.SyncStatus(canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseInitializing}) + if err != nil { + t.Fatal(err.Error()) + } + + // save last promoted hash + canary, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + err = mocks.controller.SetStatusPhase(canary, flaggerv1.CanaryPhaseInitialized) + if err != nil { + t.Fatal(err.Error()) + } + + dep, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + depClone := dep.DeepCopy() + depClone.Spec.Template.Spec.Containers[0].Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewQuantity(100, resource.DecimalExponent), + }, + } + + // update pod spec + _, err = mocks.kubeClient.AppsV1().DaemonSets("default").Update(depClone) + if err != nil { + t.Fatal(err.Error()) + } + + canary, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + // detect change in last applied spec + isNew, err := mocks.controller.HasTargetChanged(canary) + if err != nil { + t.Fatal(err.Error()) + } + if !isNew { + t.Errorf("Got %v wanted %v", isNew, true) + } + + // save hash + err = mocks.controller.SyncStatus(canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseProgressing}) + if err != nil { + t.Fatal(err.Error()) + } + + dep, err = mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + depClone = dep.DeepCopy() + depClone.Spec.Template.Spec.Containers[0].Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewQuantity(1000, resource.DecimalExponent), + }, + } + + // update pod spec + _, err = mocks.kubeClient.AppsV1().DaemonSets("default").Update(depClone) + if err != nil { + t.Fatal(err.Error()) + } + + canary, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + // ignore change as hash should be the same with last promoted + isNew, err = mocks.controller.HasTargetChanged(canary) + if err != nil { + t.Fatal(err.Error()) + } + if isNew { + t.Errorf("Got %v wanted %v", isNew, false) + } + + depClone = dep.DeepCopy() + depClone.Spec.Template.Spec.Containers[0].Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewQuantity(600, resource.DecimalExponent), + }, + } + + // update pod spec + _, err = mocks.kubeClient.AppsV1().DaemonSets("default").Update(depClone) + if err != nil { + t.Fatal(err.Error()) + } + + canary, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + // detect change + isNew, err = mocks.controller.HasTargetChanged(canary) + if err != nil { + t.Fatal(err.Error()) + } + if !isNew { + t.Errorf("Got %v wanted %v", isNew, true) + } +} + +func TestDaemonSetController_Scale(t *testing.T) { + t.Run("Scale", func(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + err = mocks.controller.Scale(mocks.canary, 0) + c, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + for k := range daemonSetScaleDownNodeSelector { + if _, ok := c.Spec.Template.Spec.NodeSelector[k]; !ok { + t.Errorf("%s should exist in node selector", k) + } + } + }) + t.Run("ScaleFromZeo", func(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + err = mocks.controller.ScaleFromZero(mocks.canary) + c, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + for k := range daemonSetScaleDownNodeSelector { + if _, ok := c.Spec.Template.Spec.NodeSelector[k]; ok { + t.Errorf("%s should not exist in node selector", k) + } + } + }) +} diff --git a/pkg/canary/daemonset_fixture_test.go b/pkg/canary/daemonset_fixture_test.go new file mode 100644 index 000000000..d0919af0e --- /dev/null +++ b/pkg/canary/daemonset_fixture_test.go @@ -0,0 +1,507 @@ +package canary + +import ( + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" + clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned" + fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake" + "github.com/weaveworks/flagger/pkg/logger" +) + +type daemonSetControllerFixture struct { + canary *flaggerv1.Canary + kubeClient kubernetes.Interface + flaggerClient clientset.Interface + controller DaemonSetController + logger *zap.SugaredLogger +} + +func newDaemonSetFixture() daemonSetControllerFixture { + // init canary + canary := newDaemonSetControllerTestCanary() + flaggerClient := fakeFlagger.NewSimpleClientset(canary) + + // init kube clientset and register mock objects + kubeClient := fake.NewSimpleClientset( + newDaemonSetControllerTestPodInfo(), + newDaemonSetControllerTestConfigMap(), + newDaemonSetControllerTestConfigMapEnv(), + newDaemonSetControllerTestConfigMapVol(), + newDaemonSetControllerTestConfigProjected(), + newDaemonSetControllerTestSecret(), + newDaemonSetControllerTestSecretEnv(), + newDaemonSetControllerTestSecretVol(), + newDaemonSetControllerTestSecretProjected(), + ) + + logger, _ := logger.NewLogger("debug") + + ctrl := DaemonSetController{ + flaggerClient: flaggerClient, + kubeClient: kubeClient, + logger: logger, + labels: []string{"app", "name"}, + configTracker: &ConfigTracker{ + Logger: logger, + KubeClient: kubeClient, + FlaggerClient: flaggerClient, + }, + } + + return daemonSetControllerFixture{ + canary: canary, + controller: ctrl, + logger: logger, + flaggerClient: flaggerClient, + kubeClient: kubeClient, + } +} + +func newDaemonSetControllerTestConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-config-env", + }, + Data: map[string]string{ + "color": "red", + }, + } +} + +func newDaemonSetControllerTestConfigMapV2() *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-config-env", + }, + Data: map[string]string{ + "color": "blue", + "output": "console", + }, + } +} + +func newDaemonSetControllerTestConfigProjected() *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-config-projected", + }, + Data: map[string]string{ + "color": "red", + }, + } +} + +func newDaemonSetControllerTestConfigMapEnv() *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-config-all-env", + }, + Data: map[string]string{ + "color": "red", + }, + } +} + +func newDaemonSetControllerTestConfigMapVol() *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-config-vol", + }, + Data: map[string]string{ + "color": "red", + }, + } +} + +func newDaemonSetControllerTestSecret() *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-secret-env", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "apiKey": []byte("test"), + }, + } +} + +func newDaemonSetControllerTestSecretProjected() *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-secret-projected", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "apiKey": []byte("test"), + }, + } +} + +func newDaemonSetControllerTestSecretEnv() *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-secret-all-env", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "apiKey": []byte("test"), + }, + } +} + +func newDaemonSetControllerTestSecretVol() *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-secret-vol", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "apiKey": []byte("test"), + }, + } +} + +func newDaemonSetControllerTestCanary() *flaggerv1.Canary { + cd := &flaggerv1.Canary{ + TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo", + }, + Spec: flaggerv1.CanarySpec{ + TargetRef: flaggerv1.CrossNamespaceObjectReference{ + Name: "podinfo", + APIVersion: "apps/v1", + Kind: "DaemonSet", + }, + }, + } + return cd +} + +func newDaemonSetControllerTestPodInfo() *appsv1.DaemonSet { + d := &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo", + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "podinfo", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "name": "podinfo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "podinfo", + Image: "quay.io/stefanprodan/podinfo:1.2.0", + Command: []string{ + "./podinfo", + "--port=9898", + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewQuantity(1000, resource.DecimalExponent), + }, + }, + Args: nil, + WorkingDir: "", + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 9898, + Protocol: corev1.ProtocolTCP, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "PODINFO_UI_COLOR", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-env", + }, + Key: "color", + }, + }, + }, + { + Name: "API_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-secret-env", + }, + Key: "apiKey", + }, + }, + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-all-env", + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-secret-all-env", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/podinfo/config", + ReadOnly: true, + }, + { + Name: "secret", + MountPath: "/etc/podinfo/secret", + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-vol", + }, + }, + }, + }, + { + Name: "secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "podinfo-secret-vol", + }, + }, + }, + { + Name: "projected", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-projected", + }, + Items: []corev1.KeyToPath{ + { + Key: "color", + Path: "my-group/my-color", + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-secret-projected", + }, + Items: []corev1.KeyToPath{ + { + Key: "apiKey", + Path: "my-group/my-api-key", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + return d +} + +func newDaemonSetControllerTestPodInfoV2() *appsv1.DaemonSet { + d := &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo", + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": "podinfo", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "name": "podinfo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "podinfo", + Image: "quay.io/stefanprodan/podinfo:1.2.1", + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 9898, + Protocol: corev1.ProtocolTCP, + }, + }, + Command: []string{ + "./podinfo", + "--port=9898", + }, + Env: []corev1.EnvVar{ + { + Name: "PODINFO_UI_COLOR", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-env", + }, + Key: "color", + }, + }, + }, + { + Name: "API_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-secret-env", + }, + Key: "apiKey", + }, + }, + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-all-env", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/podinfo/config", + ReadOnly: true, + }, + { + Name: "secret", + MountPath: "/etc/podinfo/secret", + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-vol", + }, + }, + }, + }, + { + Name: "secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "podinfo-secret-vol", + }, + }, + }, + { + Name: "projected", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-projected", + }, + Items: []corev1.KeyToPath{ + { + Key: "color", + Path: "my-group/my-color", + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-secret-projected", + }, + Items: []corev1.KeyToPath{ + { + Key: "apiKey", + Path: "my-group/my-api-key", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + return d +} diff --git a/pkg/canary/daemonset_ready.go b/pkg/canary/daemonset_ready.go new file mode 100644 index 000000000..4120082a9 --- /dev/null +++ b/pkg/canary/daemonset_ready.go @@ -0,0 +1,70 @@ +package canary + +import ( + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +) + +// IsPrimaryReady checks the primary daemonset status and returns an error if +// the daemonset is in the middle of a rolling update +func (c *DaemonSetController) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) { + primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) + primary, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(primaryName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true, fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace) + } + return true, fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err) + } + + retriable, err := c.isDaemonSetReady(cd, primary) + if err != nil { + return retriable, fmt.Errorf("halt advancement %s.%s %s", primaryName, cd.Namespace, err.Error()) + } + return true, nil +} + +// IsCanaryReady checks the primary daemonset and returns an error if +// the daemonset is in the middle of a rolling update +func (c *DaemonSetController) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) { + targetName := cd.Spec.TargetRef.Name + canary, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true, fmt.Errorf("daemonset %s.%s not found", targetName, cd.Namespace) + } + return true, fmt.Errorf("daemonset %s.%s query error %v", targetName, cd.Namespace, err) + } + + retriable, err := c.isDaemonSetReady(cd, canary) + if err != nil { + return retriable, fmt.Errorf("halt advancement %s.%s %s", targetName, cd.Namespace, err.Error()) + } + return true, nil +} + +// isDaemonSetReady determines if a daemonset is ready by checking the number of old version daemons +func (c *DaemonSetController) isDaemonSetReady(cd *flaggerv1.Canary, daemonSet *appsv1.DaemonSet) (bool, error) { + if diff := daemonSet.Status.DesiredNumberScheduled - daemonSet.Status.UpdatedNumberScheduled; diff > 0 || daemonSet.Status.NumberUnavailable > 0 { + from := cd.Status.LastTransitionTime + delta := time.Duration(cd.GetProgressDeadlineSeconds()) * time.Second + dl := from.Add(delta) + if dl.Before(time.Now()) { + return false, fmt.Errorf("daemonset %s exceeded its progress deadline", cd.GetName()) + } else { + return true, fmt.Errorf( + "waiting for rollout to finish: desiredNumberScheduled=%d, updatedNumberScheduled=%d, numberUnavailable=%d", + daemonSet.Status.DesiredNumberScheduled, + daemonSet.Status.UpdatedNumberScheduled, + daemonSet.Status.NumberUnavailable, + ) + } + } + return true, nil +} diff --git a/pkg/canary/daemonset_ready_test.go b/pkg/canary/daemonset_ready_test.go new file mode 100644 index 000000000..16b8f824f --- /dev/null +++ b/pkg/canary/daemonset_ready_test.go @@ -0,0 +1,79 @@ +package canary + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +) + +func TestDaemonSetController_IsReady(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Error("Expected primary readiness check to fail") + } + + _, err = mocks.controller.IsPrimaryReady(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } + + _, err = mocks.controller.IsCanaryReady(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } +} + +func TestDaemonSetController_isDaemonSetReady(t *testing.T) { + ds := &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 1, + UpdatedNumberScheduled: 1, + }, + } + + cd := &flaggerv1.Canary{} + cd.Spec.ProgressDeadlineSeconds = int32p(1e5) + cd.Status.LastTransitionTime = metav1.Now() + + // ready + mocks := newDaemonSetFixture() + _, err := mocks.controller.isDaemonSetReady(cd, ds) + if err != nil { + t.Fatal(err.Error()) + } + + // not ready but retriable + ds.Status.NumberUnavailable++ + retrieable, err := mocks.controller.isDaemonSetReady(cd, ds) + if err == nil { + t.Fatal("expected error") + } + if !retrieable { + t.Fatal("expected retriable") + } + ds.Status.NumberUnavailable-- + + ds.Status.DesiredNumberScheduled++ + retrieable, err = mocks.controller.isDaemonSetReady(cd, ds) + if err == nil { + t.Fatal("expected error") + } + if !retrieable { + t.Fatal("expected retriable") + } + + // not ready and not retriable + cd.Status.LastTransitionTime = metav1.Now() + cd.Spec.ProgressDeadlineSeconds = int32p(-1e5) + retrieable, err = mocks.controller.isDaemonSetReady(cd, ds) + if err == nil { + t.Fatal("expected error") + } + if retrieable { + t.Fatal("expected not retriable") + } +} diff --git a/pkg/canary/daemonset_status.go b/pkg/canary/daemonset_status.go new file mode 100644 index 000000000..49329866b --- /dev/null +++ b/pkg/canary/daemonset_status.go @@ -0,0 +1,61 @@ +package canary + +import ( + "fmt" + + ex "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +) + +// SyncStatus encodes the canary pod spec and updates the canary status +func (c *DaemonSetController) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error { + dae, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("daemonset %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace) + } + return ex.Wrap(err, "SyncStatus daemonset query error") + } + + // ignore `daemonSetScaleDownNodeSelector` node selector + for key := range daemonSetScaleDownNodeSelector { + delete(dae.Spec.Template.Spec.NodeSelector, key) + } + + // since nil and capacity zero map would have different hash, we have to initialize here + if dae.Spec.Template.Spec.NodeSelector == nil { + dae.Spec.Template.Spec.NodeSelector = map[string]string{} + } + + configs, err := c.configTracker.GetConfigRefs(cd) + if err != nil { + return ex.Wrap(err, "SyncStatus configs query error") + } + + return syncCanaryStatus(c.flaggerClient, cd, status, dae.Spec.Template, func(cdCopy *flaggerv1.Canary) { + cdCopy.Status.TrackedConfigs = configs + }) +} + +// SetStatusFailedChecks updates the canary failed checks counter +func (c *DaemonSetController) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error { + return setStatusFailedChecks(c.flaggerClient, cd, val) +} + +// SetStatusWeight updates the canary status weight value +func (c *DaemonSetController) SetStatusWeight(cd *flaggerv1.Canary, val int) error { + return setStatusWeight(c.flaggerClient, cd, val) +} + +// SetStatusIterations updates the canary status iterations value +func (c *DaemonSetController) SetStatusIterations(cd *flaggerv1.Canary, val int) error { + return setStatusIterations(c.flaggerClient, cd, val) +} + +// SetStatusPhase updates the canary status phase +func (c *DaemonSetController) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error { + return setStatusPhase(c.flaggerClient, cd, phase) +} diff --git a/pkg/canary/daemonset_status_test.go b/pkg/canary/daemonset_status_test.go new file mode 100644 index 000000000..9c4425f35 --- /dev/null +++ b/pkg/canary/daemonset_status_test.go @@ -0,0 +1,92 @@ +package canary + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +) + +func TestDaemonSetController_SyncStatus(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + status := flaggerv1.CanaryStatus{ + Phase: flaggerv1.CanaryPhaseProgressing, + FailedChecks: 2, + } + err = mocks.controller.SyncStatus(mocks.canary, status) + if err != nil { + t.Fatal(err.Error()) + } + + res, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if res.Status.Phase != status.Phase { + t.Errorf("Got state %v wanted %v", res.Status.Phase, status.Phase) + } + + if res.Status.FailedChecks != status.FailedChecks { + t.Errorf("Got failed checks %v wanted %v", res.Status.FailedChecks, status.FailedChecks) + } + + if res.Status.TrackedConfigs == nil { + t.Fatalf("Status tracking configs are empty") + } + configs := *res.Status.TrackedConfigs + secret := newDaemonSetControllerTestSecret() + if _, exists := configs["secret/"+secret.GetName()]; !exists { + t.Errorf("Secret %s not found in status", secret.GetName()) + } +} + +func TestDaemonSetController_SetFailedChecks(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + err = mocks.controller.SetStatusFailedChecks(mocks.canary, 1) + if err != nil { + t.Fatal(err.Error()) + } + + res, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if res.Status.FailedChecks != 1 { + t.Errorf("Got %v wanted %v", res.Status.FailedChecks, 1) + } +} + +func TestDaemonSetController_SetState(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + err = mocks.controller.SetStatusPhase(mocks.canary, flaggerv1.CanaryPhaseProgressing) + if err != nil { + t.Fatal(err.Error()) + } + + res, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if res.Status.Phase != flaggerv1.CanaryPhaseProgressing { + t.Errorf("Got %v wanted %v", res.Status.Phase, flaggerv1.CanaryPhaseProgressing) + } +} diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index 3fc488bdd..e27dbc636 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -1,9 +1,7 @@ package canary import ( - "crypto/rand" "fmt" - "io" "github.com/google/go-cmp/cmp" "go.uber.org/zap" @@ -13,7 +11,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" @@ -107,7 +104,7 @@ func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error { primaryCopy.Spec.Template.Spec = c.configTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs) // update pod annotations to ensure a rolling update - annotations, err := c.makeAnnotations(canary.Spec.Template.Annotations) + annotations, err := makeAnnotations(canary.Spec.Template.Annotations) if err != nil { return err } @@ -211,7 +208,7 @@ func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, map[st var ports map[string]int32 if cd.Spec.Service.PortDiscovery { - p, err := c.getPorts(cd, canaryDep) + p, err := getPorts(cd, canaryDep.Spec.Template.Spec.Containers) if err != nil { return "", nil, fmt.Errorf("port discovery failed with error: %v", err) } @@ -248,7 +245,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil { return err } - annotations, err := c.makeAnnotations(canaryDep.Spec.Template.Annotations) + annotations, err := makeAnnotations(canaryDep.Spec.Template.Annotations) if err != nil { return err } @@ -383,29 +380,6 @@ func (c *DeploymentController) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bo return nil } -// makeAnnotations appends an unique ID to annotations map -func (c *DeploymentController) makeAnnotations(annotations map[string]string) (map[string]string, error) { - idKey := "flagger-id" - res := make(map[string]string) - uuid := make([]byte, 16) - n, err := io.ReadFull(rand.Reader, uuid) - if n != len(uuid) || err != nil { - return res, err - } - uuid[8] = uuid[8]&^0xc0 | 0x80 - uuid[6] = uuid[6]&^0xf0 | 0x40 - id := fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]) - - for k, v := range annotations { - if k != idKey { - res[k] = v - } - } - res[idKey] = id - - return res, nil -} - // getSelectorLabel returns the selector match label func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) (string, error) { for _, l := range c.labels { @@ -417,74 +391,6 @@ func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) ( return "", fmt.Errorf("selector not found") } -var sidecars = map[string]bool{ - "istio-proxy": true, - "envoy": true, -} - func (c *DeploymentController) HaveDependenciesChanged(cd *flaggerv1.Canary) (bool, error) { return c.configTracker.HasConfigChanged(cd) } - -// getPorts returns a list of all container ports -func (c *DeploymentController) getPorts(cd *flaggerv1.Canary, deployment *appsv1.Deployment) (map[string]int32, error) { - ports := make(map[string]int32) - - for _, container := range deployment.Spec.Template.Spec.Containers { - // exclude service mesh proxies based on container name - if _, ok := sidecars[container.Name]; ok { - continue - } - for i, p := range container.Ports { - // exclude canary.service.port or canary.service.targetPort - if cd.Spec.Service.TargetPort.String() == "0" { - if p.ContainerPort == cd.Spec.Service.Port { - continue - } - } else { - if cd.Spec.Service.TargetPort.Type == intstr.Int { - if p.ContainerPort == cd.Spec.Service.TargetPort.IntVal { - continue - } - } - if cd.Spec.Service.TargetPort.Type == intstr.String { - if p.Name == cd.Spec.Service.TargetPort.StrVal { - continue - } - } - } - name := fmt.Sprintf("tcp-%s-%v", container.Name, i) - if p.Name != "" { - name = p.Name - } - - ports[name] = p.ContainerPort - } - } - - return ports, nil -} - -func makePrimaryLabels(labels map[string]string, primaryName string, label string) map[string]string { - res := make(map[string]string) - for k, v := range labels { - if k != label { - res[k] = v - } - } - res[label] = primaryName - - return res -} - -func int32p(i int32) *int32 { - return &i -} - -func int32Default(i *int32) int32 { - if i == nil { - return 1 - } - - return *i -} diff --git a/pkg/canary/deployment_controller_test.go b/pkg/canary/deployment_controller_test.go index d77924365..7fc2975eb 100644 --- a/pkg/canary/deployment_controller_test.go +++ b/pkg/canary/deployment_controller_test.go @@ -11,9 +11,9 @@ import ( flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" ) -func TestCanaryDeployer_Sync(t *testing.T) { - mocks := newFixture() - err := mocks.deployer.Initialize(mocks.canary, true) +func TestDeploymentController_Sync(t *testing.T) { + mocks := newDeploymentFixture() + err := mocks.controller.Initialize(mocks.canary, true) if err != nil { t.Fatal(err.Error()) } @@ -23,7 +23,7 @@ func TestCanaryDeployer_Sync(t *testing.T) { t.Fatal(err.Error()) } - dep := newTestDeployment() + dep := newDeploymentControllerTest() primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image sourceImage := dep.Spec.Template.Spec.Containers[0].Image @@ -41,20 +41,20 @@ func TestCanaryDeployer_Sync(t *testing.T) { } } -func TestCanaryDeployer_Promote(t *testing.T) { - mocks := newFixture() - err := mocks.deployer.Initialize(mocks.canary, true) +func TestDeploymentController_Promote(t *testing.T) { + mocks := newDeploymentFixture() + err := mocks.controller.Initialize(mocks.canary, true) if err != nil { t.Fatal(err.Error()) } - dep2 := newTestDeploymentV2() + dep2 := newDeploymentControllerTestV2() _, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2) if err != nil { t.Fatal(err.Error()) } - config2 := NewTestConfigMapV2() + config2 := newDeploymentControllerTestConfigMapV2() _, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Update(config2) if err != nil { t.Fatal(err.Error()) @@ -72,7 +72,7 @@ func TestCanaryDeployer_Promote(t *testing.T) { t.Fatal(err.Error()) } - err = mocks.deployer.Promote(mocks.canary) + err = mocks.controller.Promote(mocks.canary) if err != nil { t.Fatal(err.Error()) } @@ -107,32 +107,32 @@ func TestCanaryDeployer_Promote(t *testing.T) { } } -func TestCanaryDeployer_IsReady(t *testing.T) { - mocks := newFixture() - err := mocks.deployer.Initialize(mocks.canary, true) +func TestDeploymentController_IsReady(t *testing.T) { + mocks := newDeploymentFixture() + err := mocks.controller.Initialize(mocks.canary, true) if err != nil { t.Error("Expected primary readiness check to fail") } - _, err = mocks.deployer.IsPrimaryReady(mocks.canary) + _, err = mocks.controller.IsPrimaryReady(mocks.canary) if err == nil { t.Fatal(err.Error()) } - _, err = mocks.deployer.IsCanaryReady(mocks.canary) + _, err = mocks.controller.IsCanaryReady(mocks.canary) if err != nil { t.Fatal(err.Error()) } } -func TestCanaryDeployer_SetFailedChecks(t *testing.T) { - mocks := newFixture() - err := mocks.deployer.Initialize(mocks.canary, true) +func TestDeploymentController_SetFailedChecks(t *testing.T) { + mocks := newDeploymentFixture() + err := mocks.controller.Initialize(mocks.canary, true) if err != nil { t.Fatal(err.Error()) } - err = mocks.deployer.SetStatusFailedChecks(mocks.canary, 1) + err = mocks.controller.SetStatusFailedChecks(mocks.canary, 1) if err != nil { t.Fatal(err.Error()) } @@ -147,14 +147,14 @@ func TestCanaryDeployer_SetFailedChecks(t *testing.T) { } } -func TestCanaryDeployer_SetState(t *testing.T) { - mocks := newFixture() - err := mocks.deployer.Initialize(mocks.canary, true) +func TestDeploymentController_SetState(t *testing.T) { + mocks := newDeploymentFixture() + err := mocks.controller.Initialize(mocks.canary, true) if err != nil { t.Fatal(err.Error()) } - err = mocks.deployer.SetStatusPhase(mocks.canary, flaggerv1.CanaryPhaseProgressing) + err = mocks.controller.SetStatusPhase(mocks.canary, flaggerv1.CanaryPhaseProgressing) if err != nil { t.Fatal(err.Error()) } @@ -169,9 +169,9 @@ func TestCanaryDeployer_SetState(t *testing.T) { } } -func TestCanaryDeployer_SyncStatus(t *testing.T) { - mocks := newFixture() - err := mocks.deployer.Initialize(mocks.canary, true) +func TestDeploymentController_SyncStatus(t *testing.T) { + mocks := newDeploymentFixture() + err := mocks.controller.Initialize(mocks.canary, true) if err != nil { t.Fatal(err.Error()) } @@ -180,7 +180,7 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) { Phase: flaggerv1.CanaryPhaseProgressing, FailedChecks: 2, } - err = mocks.deployer.SyncStatus(mocks.canary, status) + err = mocks.controller.SyncStatus(mocks.canary, status) if err != nil { t.Fatal(err.Error()) } @@ -202,20 +202,20 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) { t.Fatalf("Status tracking configs are empty") } configs := *res.Status.TrackedConfigs - secret := newTestSecret() + secret := newDeploymentControllerTestSecret() if _, exists := configs["secret/"+secret.GetName()]; !exists { t.Errorf("Secret %s not found in status", secret.GetName()) } } -func TestCanaryDeployer_Scale(t *testing.T) { - mocks := newFixture() - err := mocks.deployer.Initialize(mocks.canary, true) +func TestDeploymentController_Scale(t *testing.T) { + mocks := newDeploymentFixture() + err := mocks.controller.Initialize(mocks.canary, true) if err != nil { t.Fatal(err.Error()) } - err = mocks.deployer.Scale(mocks.canary, 2) + err = mocks.controller.Scale(mocks.canary, 2) c, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{}) if err != nil { @@ -227,11 +227,11 @@ func TestCanaryDeployer_Scale(t *testing.T) { } } -func TestCanaryDeployer_NoConfigTracking(t *testing.T) { - mocks := newFixture() - mocks.deployer.configTracker = &NopTracker{} +func TestDeploymentController_NoConfigTracking(t *testing.T) { + mocks := newDeploymentFixture() + mocks.controller.configTracker = &NopTracker{} - err := mocks.deployer.Initialize(mocks.canary, true) + err := mocks.controller.Initialize(mocks.canary, true) if err != nil { t.Fatal(err.Error()) } @@ -252,9 +252,9 @@ func TestCanaryDeployer_NoConfigTracking(t *testing.T) { } } -func TestCanaryDeployer_HasTargetChanged(t *testing.T) { - mocks := newFixture() - err := mocks.deployer.Initialize(mocks.canary, true) +func TestDeploymentController_HasTargetChanged(t *testing.T) { + mocks := newDeploymentFixture() + err := mocks.controller.Initialize(mocks.canary, true) if err != nil { t.Fatal(err.Error()) } @@ -264,7 +264,7 @@ func TestCanaryDeployer_HasTargetChanged(t *testing.T) { if err != nil { t.Fatal(err.Error()) } - err = mocks.deployer.SyncStatus(canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseInitializing}) + err = mocks.controller.SyncStatus(canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseInitializing}) if err != nil { t.Fatal(err.Error()) } @@ -274,7 +274,7 @@ func TestCanaryDeployer_HasTargetChanged(t *testing.T) { if err != nil { t.Fatal(err.Error()) } - err = mocks.deployer.SetStatusPhase(canary, flaggerv1.CanaryPhaseInitialized) + err = mocks.controller.SetStatusPhase(canary, flaggerv1.CanaryPhaseInitialized) if err != nil { t.Fatal(err.Error()) } @@ -303,7 +303,7 @@ func TestCanaryDeployer_HasTargetChanged(t *testing.T) { } // detect change in last applied spec - isNew, err := mocks.deployer.HasTargetChanged(canary) + isNew, err := mocks.controller.HasTargetChanged(canary) if err != nil { t.Fatal(err.Error()) } @@ -312,7 +312,7 @@ func TestCanaryDeployer_HasTargetChanged(t *testing.T) { } // save hash - err = mocks.deployer.SyncStatus(canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseProgressing}) + err = mocks.controller.SyncStatus(canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseProgressing}) if err != nil { t.Fatal(err.Error()) } @@ -341,7 +341,7 @@ func TestCanaryDeployer_HasTargetChanged(t *testing.T) { } // ignore change as hash should be the same with last promoted - isNew, err = mocks.deployer.HasTargetChanged(canary) + isNew, err = mocks.controller.HasTargetChanged(canary) if err != nil { t.Fatal(err.Error()) } @@ -368,7 +368,7 @@ func TestCanaryDeployer_HasTargetChanged(t *testing.T) { } // detect change - isNew, err = mocks.deployer.HasTargetChanged(canary) + isNew, err = mocks.controller.HasTargetChanged(canary) if err != nil { t.Fatal(err.Error()) } diff --git a/pkg/canary/fixture.go b/pkg/canary/deployment_fixture_test.go similarity index 89% rename from pkg/canary/fixture.go rename to pkg/canary/deployment_fixture_test.go index 371dddd54..62e2a2b9f 100644 --- a/pkg/canary/fixture.go +++ b/pkg/canary/deployment_fixture_test.go @@ -1,7 +1,6 @@ package canary import ( - "github.com/weaveworks/flagger/pkg/logger" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" hpav2 "k8s.io/api/autoscaling/v2beta1" @@ -14,38 +13,39 @@ import ( flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned" fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake" + "github.com/weaveworks/flagger/pkg/logger" ) -type fixture struct { +type deploymentControllerFixture struct { canary *flaggerv1.Canary kubeClient kubernetes.Interface flaggerClient clientset.Interface - deployer DeploymentController + controller DeploymentController logger *zap.SugaredLogger } -func newFixture() fixture { +func newDeploymentFixture() deploymentControllerFixture { // init canary - canary := newTestCanary() + canary := newDeploymentControllerTestCanary() flaggerClient := fakeFlagger.NewSimpleClientset(canary) // init kube clientset and register mock objects kubeClient := fake.NewSimpleClientset( - newTestDeployment(), - newTestHPA(), - newTestConfigMap(), - newTestConfigMapEnv(), - newTestConfigMapVol(), - newTestConfigProjected(), - newTestSecret(), - newTestSecretEnv(), - newTestSecretVol(), - newTestSecretProjected(), + newDeploymentControllerTest(), + newDeploymentControllerTestHPA(), + newDeploymentControllerTestConfigMap(), + newDeploymentControllerTestConfigMapEnv(), + newDeploymentControllerTestConfigMapVol(), + newDeploymentControllerTestConfigProjected(), + newDeploymentControllerTestSecret(), + newDeploymentControllerTestSecretEnv(), + newDeploymentControllerTestSecretVol(), + newDeploymentControllerTestSecretProjected(), ) logger, _ := logger.NewLogger("debug") - deployer := DeploymentController{ + ctrl := DeploymentController{ flaggerClient: flaggerClient, kubeClient: kubeClient, logger: logger, @@ -57,16 +57,16 @@ func newFixture() fixture { }, } - return fixture{ + return deploymentControllerFixture{ canary: canary, - deployer: deployer, + controller: ctrl, logger: logger, flaggerClient: flaggerClient, kubeClient: kubeClient, } } -func newTestConfigMap() *corev1.ConfigMap { +func newDeploymentControllerTestConfigMap() *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -79,7 +79,7 @@ func newTestConfigMap() *corev1.ConfigMap { } } -func NewTestConfigMapV2() *corev1.ConfigMap { +func newDeploymentControllerTestConfigMapV2() *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -93,7 +93,7 @@ func NewTestConfigMapV2() *corev1.ConfigMap { } } -func newTestConfigProjected() *corev1.ConfigMap { +func newDeploymentControllerTestConfigProjected() *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -106,7 +106,7 @@ func newTestConfigProjected() *corev1.ConfigMap { } } -func newTestConfigMapEnv() *corev1.ConfigMap { +func newDeploymentControllerTestConfigMapEnv() *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -119,7 +119,7 @@ func newTestConfigMapEnv() *corev1.ConfigMap { } } -func newTestConfigMapVol() *corev1.ConfigMap { +func newDeploymentControllerTestConfigMapVol() *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -132,7 +132,7 @@ func newTestConfigMapVol() *corev1.ConfigMap { } } -func newTestSecret() *corev1.Secret { +func newDeploymentControllerTestSecret() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -146,7 +146,7 @@ func newTestSecret() *corev1.Secret { } } -func newTestSecretProjected() *corev1.Secret { +func newDeploymentControllerTestSecretProjected() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -160,7 +160,7 @@ func newTestSecretProjected() *corev1.Secret { } } -func newTestSecretEnv() *corev1.Secret { +func newDeploymentControllerTestSecretEnv() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -174,7 +174,7 @@ func newTestSecretEnv() *corev1.Secret { } } -func newTestSecretVol() *corev1.Secret { +func newDeploymentControllerTestSecretVol() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -188,7 +188,7 @@ func newTestSecretVol() *corev1.Secret { } } -func newTestCanary() *flaggerv1.Canary { +func newDeploymentControllerTestCanary() *flaggerv1.Canary { cd := &flaggerv1.Canary{ TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -217,7 +217,7 @@ func newTestCanary() *flaggerv1.Canary { return cd } -func newTestDeployment() *appsv1.Deployment { +func newDeploymentControllerTest() *appsv1.Deployment { d := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -376,7 +376,7 @@ func newTestDeployment() *appsv1.Deployment { return d } -func newTestDeploymentV2() *appsv1.Deployment { +func newDeploymentControllerTestV2() *appsv1.Deployment { d := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -521,7 +521,7 @@ func newTestDeploymentV2() *appsv1.Deployment { return d } -func newTestHPA() *hpav2.HorizontalPodAutoscaler { +func newDeploymentControllerTestHPA() *hpav2.HorizontalPodAutoscaler { h := &hpav2.HorizontalPodAutoscaler{ TypeMeta: metav1.TypeMeta{APIVersion: hpav2.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/canary/ready.go b/pkg/canary/deployment_ready.go similarity index 98% rename from pkg/canary/ready.go rename to pkg/canary/deployment_ready.go index 4f29cd112..083633f96 100644 --- a/pkg/canary/ready.go +++ b/pkg/canary/deployment_ready.go @@ -36,7 +36,7 @@ func (c *DeploymentController) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error return true, nil } -// IsCanaryReady checks the primary deployment status and returns an error if +// IsCanaryReady checks the canary deployment status and returns an error if // the deployment is in the middle of a rolling update or if the pods are unhealthy // it will return a non retriable error if the rolling update is stuck func (c *DeploymentController) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) { diff --git a/pkg/canary/deployment_status.go b/pkg/canary/deployment_status.go new file mode 100644 index 000000000..e5577b511 --- /dev/null +++ b/pkg/canary/deployment_status.go @@ -0,0 +1,51 @@ +package canary + +import ( + "fmt" + + ex "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +) + +// SyncStatus encodes the canary pod spec and updates the canary status +func (c *DeploymentController) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error { + dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace) + } + return ex.Wrap(err, "SyncStatus deployment query error") + } + + configs, err := c.configTracker.GetConfigRefs(cd) + if err != nil { + return ex.Wrap(err, "SyncStatus configs query error") + } + + return syncCanaryStatus(c.flaggerClient, cd, status, dep.Spec.Template, func(cdCopy *flaggerv1.Canary) { + cdCopy.Status.TrackedConfigs = configs + }) +} + +// SetStatusFailedChecks updates the canary failed checks counter +func (c *DeploymentController) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error { + return setStatusFailedChecks(c.flaggerClient, cd, val) +} + +// SetStatusWeight updates the canary status weight value +func (c *DeploymentController) SetStatusWeight(cd *flaggerv1.Canary, val int) error { + return setStatusWeight(c.flaggerClient, cd, val) +} + +// SetStatusIterations updates the canary status iterations value +func (c *DeploymentController) SetStatusIterations(cd *flaggerv1.Canary, val int) error { + return setStatusIterations(c.flaggerClient, cd, val) +} + +// SetStatusPhase updates the canary status phase +func (c *DeploymentController) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error { + return setStatusPhase(c.flaggerClient, cd, phase) +} diff --git a/pkg/canary/factory.go b/pkg/canary/factory.go index 3810374e2..416a0c91e 100644 --- a/pkg/canary/factory.go +++ b/pkg/canary/factory.go @@ -37,6 +37,13 @@ func (factory *Factory) Controller(kind string) Controller { labels: factory.labels, configTracker: factory.configTracker, } + daemonSetCtrl := &DaemonSetController{ + logger: factory.logger, + kubeClient: factory.kubeClient, + flaggerClient: factory.flaggerClient, + labels: factory.labels, + configTracker: factory.configTracker, + } serviceCtrl := &ServiceController{ logger: factory.logger, kubeClient: factory.kubeClient, @@ -44,6 +51,8 @@ func (factory *Factory) Controller(kind string) Controller { } switch { + case kind == "DaemonSet": + return daemonSetCtrl case kind == "Deployment": return deploymentCtrl case kind == "Service": diff --git a/pkg/canary/status.go b/pkg/canary/status.go index ef10edc16..d65fd979d 100644 --- a/pkg/canary/status.go +++ b/pkg/canary/status.go @@ -6,7 +6,6 @@ import ( ex "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" @@ -14,26 +13,6 @@ import ( clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned" ) -// SyncStatus encodes the canary pod spec and updates the canary status -func (c *DeploymentController) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error { - dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace) - } - return ex.Wrap(err, "SyncStatus deployment query error") - } - - configs, err := c.configTracker.GetConfigRefs(cd) - if err != nil { - return ex.Wrap(err, "SyncStatus configs query error") - } - - return syncCanaryStatus(c.flaggerClient, cd, status, dep.Spec.Template, func(cdCopy *flaggerv1.Canary) { - cdCopy.Status.TrackedConfigs = configs - }) -} - func syncCanaryStatus(flaggerClient clientset.Interface, cd *flaggerv1.Canary, status flaggerv1.CanaryStatus, canaryResource interface{}, setAll func(cdCopy *flaggerv1.Canary)) error { hash := computeHash(canaryResource) @@ -56,7 +35,7 @@ func syncCanaryStatus(flaggerClient clientset.Interface, cd *flaggerv1.Canary, s cdCopy.Status.LastTransitionTime = metav1.Now() setAll(cdCopy) - if ok, conditions := MakeStatusConditions(cd.Status, status.Phase); ok { + if ok, conditions := MakeStatusConditions(cd, status.Phase); ok { cdCopy.Status.Conditions = conditions } @@ -70,11 +49,6 @@ func syncCanaryStatus(flaggerClient clientset.Interface, cd *flaggerv1.Canary, s return nil } -// SetStatusFailedChecks updates the canary failed checks counter -func (c *DeploymentController) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error { - return setStatusFailedChecks(c.flaggerClient, cd, val) -} - func setStatusFailedChecks(flaggerClient clientset.Interface, cd *flaggerv1.Canary, val int) error { firstTry := true err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { @@ -99,11 +73,6 @@ func setStatusFailedChecks(flaggerClient clientset.Interface, cd *flaggerv1.Cana return nil } -// SetStatusWeight updates the canary status weight value -func (c *DeploymentController) SetStatusWeight(cd *flaggerv1.Canary, val int) error { - return setStatusWeight(c.flaggerClient, cd, val) -} - func setStatusWeight(flaggerClient clientset.Interface, cd *flaggerv1.Canary, val int) error { firstTry := true err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { @@ -128,11 +97,6 @@ func setStatusWeight(flaggerClient clientset.Interface, cd *flaggerv1.Canary, va return nil } -// SetStatusIterations updates the canary status iterations value -func (c *DeploymentController) SetStatusIterations(cd *flaggerv1.Canary, val int) error { - return setStatusIterations(c.flaggerClient, cd, val) -} - func setStatusIterations(flaggerClient clientset.Interface, cd *flaggerv1.Canary, val int) error { firstTry := true err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { @@ -159,11 +123,6 @@ func setStatusIterations(flaggerClient clientset.Interface, cd *flaggerv1.Canary return nil } -// SetStatusPhase updates the canary status phase -func (c *DeploymentController) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error { - return setStatusPhase(c.flaggerClient, cd, phase) -} - func setStatusPhase(flaggerClient clientset.Interface, cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error { firstTry := true err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { @@ -189,7 +148,7 @@ func setStatusPhase(flaggerClient clientset.Interface, cd *flaggerv1.Canary, pha cdCopy.Status.LastPromotedSpec = cd.Status.LastAppliedSpec } - if ok, conditions := MakeStatusConditions(cdCopy.Status, phase); ok { + if ok, conditions := MakeStatusConditions(cdCopy, phase); ok { cdCopy.Status.Conditions = conditions } @@ -215,19 +174,19 @@ func getStatusCondition(status flaggerv1.CanaryStatus, conditionType flaggerv1.C } // MakeStatusCondition updates the canary status conditions based on canary phase -func MakeStatusConditions(canaryStatus flaggerv1.CanaryStatus, +func MakeStatusConditions(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) (bool, []flaggerv1.CanaryCondition) { - currentCondition := getStatusCondition(canaryStatus, flaggerv1.PromotedType) + currentCondition := getStatusCondition(cd.Status, flaggerv1.PromotedType) - message := "New deployment detected, starting initialization." + message := fmt.Sprintf("New %s detected, starting initialization.", cd.Spec.TargetRef.Kind) status := corev1.ConditionUnknown switch phase { case flaggerv1.CanaryPhaseInitializing: status = corev1.ConditionUnknown - message = "New deployment detected, starting initialization." + message = fmt.Sprintf("New %s detected, starting initialization.", cd.Spec.TargetRef.Kind) case flaggerv1.CanaryPhaseInitialized: status = corev1.ConditionTrue - message = "Deployment initialization completed." + message = fmt.Sprintf("%s initialization completed.", cd.Spec.TargetRef.Kind) case flaggerv1.CanaryPhaseWaiting: status = corev1.ConditionUnknown message = "Waiting for approval." @@ -245,7 +204,7 @@ func MakeStatusConditions(canaryStatus flaggerv1.CanaryStatus, message = "Canary analysis completed successfully, promotion finished." case flaggerv1.CanaryPhaseFailed: status = corev1.ConditionFalse - message = "Canary analysis failed, deployment scaled to zero." + message = fmt.Sprintf("Canary analysis failed, %s scaled to zero.", cd.Spec.TargetRef.Kind) } newCondition := &flaggerv1.CanaryCondition{ diff --git a/pkg/canary/util.go b/pkg/canary/util.go new file mode 100644 index 000000000..cbe9c0e84 --- /dev/null +++ b/pkg/canary/util.go @@ -0,0 +1,100 @@ +package canary + +import ( + "crypto/rand" + "fmt" + "io" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +) + +var sidecars = map[string]bool{ + "istio-proxy": true, + "envoy": true, +} + +func getPorts(cd *flaggerv1.Canary, cs []corev1.Container) (map[string]int32, error) { + ports := make(map[string]int32, len(cs)) + for _, container := range cs { + // exclude service mesh proxies based on container name + if _, ok := sidecars[container.Name]; ok { + continue + } + for i, p := range container.Ports { + // exclude canary.service.port or canary.service.targetPort + if cd.Spec.Service.TargetPort.String() == "0" { + if p.ContainerPort == cd.Spec.Service.Port { + continue + } + } else { + if cd.Spec.Service.TargetPort.Type == intstr.Int { + if p.ContainerPort == cd.Spec.Service.TargetPort.IntVal { + continue + } + } + if cd.Spec.Service.TargetPort.Type == intstr.String { + if p.Name == cd.Spec.Service.TargetPort.StrVal { + continue + } + } + } + name := fmt.Sprintf("tcp-%s-%v", container.Name, i) + if p.Name != "" { + name = p.Name + } + + ports[name] = p.ContainerPort + } + } + return ports, nil +} + +// makeAnnotations appends an unique ID to annotations map +func makeAnnotations(annotations map[string]string) (map[string]string, error) { + idKey := "flagger-id" + res := make(map[string]string) + uuid := make([]byte, 16) + n, err := io.ReadFull(rand.Reader, uuid) + if n != len(uuid) || err != nil { + return res, err + } + uuid[8] = uuid[8]&^0xc0 | 0x80 + uuid[6] = uuid[6]&^0xf0 | 0x40 + id := fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]) + + for k, v := range annotations { + if k != idKey { + res[k] = v + } + } + res[idKey] = id + + return res, nil +} + +func makePrimaryLabels(labels map[string]string, primaryName string, label string) map[string]string { + res := make(map[string]string) + for k, v := range labels { + if k != label { + res[k] = v + } + } + res[label] = primaryName + + return res +} + +func int32p(i int32) *int32 { + return &i +} + +func int32Default(i *int32) int32 { + if i == nil { + return 1 + } + + return *i +} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index e3677d712..5c0d1caa4 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -222,7 +222,7 @@ func (c *Controller) syncHandler(key string) error { // set status condition for new canaries if cd.Status.Conditions == nil { - if ok, conditions := canary.MakeStatusConditions(cd.Status, flaggerv1.CanaryPhaseInitializing); ok { + if ok, conditions := canary.MakeStatusConditions(cd, flaggerv1.CanaryPhaseInitializing); ok { cdCopy := cd.DeepCopy() cdCopy.Status.Conditions = conditions cdCopy.Status.LastTransitionTime = metav1.Now() diff --git a/pkg/controller/scheduler.go b/pkg/controller/scheduler.go index 1fd4a54fb..bfbef5853 100644 --- a/pkg/controller/scheduler.go +++ b/pkg/controller/scheduler.go @@ -116,7 +116,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh return } - // create primary deployment and hpa + // create primary err = canaryController.Initialize(cd, skipLivenessChecks) if err != nil { c.recordEventWarningf(cd, "%v", err) @@ -161,7 +161,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh maxWeight = cd.Spec.CanaryAnalysis.MaxWeight } - // check primary deployment status + // check primary status if !skipLivenessChecks && !cd.Spec.SkipAnalysis { if _, err := canaryController.IsPrimaryReady(cd); err != nil { c.recordEventWarningf(cd, "%v", err) @@ -210,7 +210,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh return } - // check canary deployment status + // check canary status var retriable = true if !skipLivenessChecks { retriable, err = canaryController.IsCanaryReady(cd) @@ -617,7 +617,7 @@ func (c *Controller) checkCanaryStatus(canary *flaggerv1.Canary, canaryControlle } c.recorder.SetStatus(canary, flaggerv1.CanaryPhaseInitialized) c.recordEventInfof(canary, "Initialization done! %s.%s", canary.Name, canary.Namespace) - c.alert(canary, "New deployment detected, initialization completed.", + c.alert(canary, fmt.Sprintf("New %s detected, initialization completed.", canary.Spec.TargetRef.Kind), true, flaggerv1.SeverityInfo) return false } diff --git a/pkg/controller/scheduler_common_test.go b/pkg/controller/scheduler_common_test.go new file mode 100644 index 000000000..512a0f4ce --- /dev/null +++ b/pkg/controller/scheduler_common_test.go @@ -0,0 +1,10 @@ +package controller + +func alwaysReady() bool { + return true +} + +func toFloatPtr(val int) *float64 { + v := float64(val) + return &v +} diff --git a/pkg/controller/scheduler_daemonset_fixture_test.go b/pkg/controller/scheduler_daemonset_fixture_test.go new file mode 100644 index 000000000..8e0b24786 --- /dev/null +++ b/pkg/controller/scheduler_daemonset_fixture_test.go @@ -0,0 +1,693 @@ +package controller + +import ( + "sync" + "time" + + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" + istiov1alpha1 "github.com/weaveworks/flagger/pkg/apis/istio/common/v1alpha1" + istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3" + "github.com/weaveworks/flagger/pkg/canary" + clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned" + fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake" + informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions" + "github.com/weaveworks/flagger/pkg/logger" + "github.com/weaveworks/flagger/pkg/metrics" + "github.com/weaveworks/flagger/pkg/metrics/observers" + "github.com/weaveworks/flagger/pkg/router" +) + +type daemonSetFixture struct { + canary *flaggerv1.Canary + kubeClient kubernetes.Interface + meshClient clientset.Interface + flaggerClient clientset.Interface + deployer canary.Controller + ctrl *Controller + logger *zap.SugaredLogger + router router.Interface +} + +func newDaemonSetFixture(c *flaggerv1.Canary) daemonSetFixture { + if c == nil { + c = newDaemonSetTestCanary() + } + + // init Flagger clientset and register objects + flaggerClient := fakeFlagger.NewSimpleClientset( + c, + newDaemonSetTestMetricTemplate(), + newDaemonSetTestAlertProvider(), + ) + + // init Kubernetes clientset and register objects + kubeClient := fake.NewSimpleClientset( + newDaemonSetTestDaemonSet(), + newDaemonSetTestService(), + newDaemonSetTestConfigMap(), + newDaemonSetTestConfigMapEnv(), + newDaemonSetTestConfigMapVol(), + newDaemonSetTestSecret(), + newDaemonSetTestSecretEnv(), + newDaemonSetTestSecretVol(), + newDaemonSetTestAlertProviderSecret(), + ) + + logger, _ := logger.NewLogger("debug") + + // init controller + flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, 0) + + fi := Informers{ + CanaryInformer: flaggerInformerFactory.Flagger().V1beta1().Canaries(), + MetricInformer: flaggerInformerFactory.Flagger().V1beta1().MetricTemplates(), + AlertInformer: flaggerInformerFactory.Flagger().V1beta1().AlertProviders(), + } + + // init router + rf := router.NewFactory(nil, kubeClient, flaggerClient, "annotationsPrefix", logger, flaggerClient) + + // init observer + observerFactory, _ := observers.NewFactory("fake") + + // init canary factory + configTracker := &canary.ConfigTracker{ + Logger: logger, + KubeClient: kubeClient, + FlaggerClient: flaggerClient, + } + canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, logger) + + ctrl := &Controller{ + kubeClient: kubeClient, + istioClient: flaggerClient, + flaggerClient: flaggerClient, + flaggerInformers: fi, + flaggerSynced: fi.CanaryInformer.Informer().HasSynced, + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName), + eventRecorder: &record.FakeRecorder{}, + logger: logger, + canaries: new(sync.Map), + flaggerWindow: time.Second, + canaryFactory: canaryFactory, + observerFactory: observerFactory, + recorder: metrics.NewRecorder(controllerAgentName, false), + routerFactory: rf, + } + ctrl.flaggerSynced = alwaysReady + ctrl.flaggerInformers.CanaryInformer.Informer().GetIndexer().Add(c) + ctrl.flaggerInformers.MetricInformer.Informer().GetIndexer().Add(newDaemonSetTestMetricTemplate()) + ctrl.flaggerInformers.AlertInformer.Informer().GetIndexer().Add(newDaemonSetTestAlertProvider()) + + meshRouter := rf.MeshRouter("istio") + + return daemonSetFixture{ + canary: c, + deployer: canaryFactory.Controller("DaemonSet"), + logger: logger, + flaggerClient: flaggerClient, + meshClient: flaggerClient, + kubeClient: kubeClient, + ctrl: ctrl, + router: meshRouter, + } +} + +func newDaemonSetTestConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-config-env", + }, + Data: map[string]string{ + "color": "red", + }, + } +} + +func newDaemonSetTestConfigMapV2() *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-config-env", + }, + Data: map[string]string{ + "color": "blue", + "output": "console", + }, + } +} + +func newDaemonSetTestConfigMapEnv() *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-config-all-env", + }, + Data: map[string]string{ + "color": "red", + }, + } +} + +func newDaemonSetTestConfigMapVol() *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-config-vol", + }, + Data: map[string]string{ + "color": "red", + }, + } +} + +func newDaemonSetTestSecret() *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-secret-env", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "apiKey": []byte("test"), + "username": []byte("test"), + "password": []byte("test"), + }, + } +} + +func newDaemonSetTestSecretV2() *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-secret-env", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "apiKey": []byte("test2"), + "username": []byte("test"), + "password": []byte("test"), + }, + } +} + +func newDaemonSetTestSecretEnv() *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-secret-all-env", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "apiKey": []byte("test"), + }, + } +} + +func newDaemonSetTestSecretVol() *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo-secret-vol", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "apiKey": []byte("test"), + }, + } +} + +func newDaemonSetTestCanary() *flaggerv1.Canary { + cd := &flaggerv1.Canary{ + TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo", + }, + Spec: flaggerv1.CanarySpec{ + TargetRef: flaggerv1.CrossNamespaceObjectReference{ + Name: "podinfo", + APIVersion: "apps/v1", + Kind: "DaemonSet", + }, Service: flaggerv1.CanaryService{ + Port: 9898, + }, CanaryAnalysis: flaggerv1.CanaryAnalysis{ + Threshold: 10, + StepWeight: 10, + MaxWeight: 50, + Metrics: []flaggerv1.CanaryMetric{ + { + Name: "request-success-rate", + Threshold: 99, + Interval: "1m", + }, + { + Name: "request-duration", + ThresholdRange: &flaggerv1.CanaryThresholdRange{ + Min: toFloatPtr(0), + Max: toFloatPtr(500000), + }, + Interval: "1m", + }, + { + Name: "custom", + ThresholdRange: &flaggerv1.CanaryThresholdRange{ + Min: toFloatPtr(0), + Max: toFloatPtr(100), + }, + Interval: "1m", + TemplateRef: &flaggerv1.CrossNamespaceObjectReference{ + Name: "envoy", + Namespace: "default", + }, + }, + }, + }, + }, + } + return cd +} + +func newDaemonSetTestCanaryMirror() *flaggerv1.Canary { + cd := newDaemonSetTestCanary() + cd.Spec.CanaryAnalysis.Mirror = true + return cd +} + +func newDaemonSetTestCanaryAB() *flaggerv1.Canary { + cd := &flaggerv1.Canary{ + TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo", + }, + Spec: flaggerv1.CanarySpec{ + TargetRef: flaggerv1.CrossNamespaceObjectReference{ + Name: "podinfo", + APIVersion: "apps/v1", + Kind: "DaemonSet", + }, Service: flaggerv1.CanaryService{ + Port: 9898, + }, CanaryAnalysis: flaggerv1.CanaryAnalysis{ + Threshold: 10, + Iterations: 10, + Match: []istiov1alpha3.HTTPMatchRequest{ + { + Headers: map[string]istiov1alpha1.StringMatch{ + "x-user-type": { + Exact: "test", + }, + }, + }, + }, + Metrics: []flaggerv1.CanaryMetric{ + { + Name: "request-success-rate", + ThresholdRange: &flaggerv1.CanaryThresholdRange{ + Min: toFloatPtr(99), + Max: toFloatPtr(100), + }, + Interval: "1m", + }, + { + Name: "request-duration", + Threshold: 500000, + Interval: "1m", + }, + { + Name: "custom", + ThresholdRange: &flaggerv1.CanaryThresholdRange{ + Min: toFloatPtr(0), + Max: toFloatPtr(500000), + }, + Interval: "1m", + Query: "fake", + }, + }, + }, + }, + } + return cd +} + +func newDaemonSetTestDaemonSet() *appsv1.DaemonSet { + d := &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo", + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "podinfo", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "podinfo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "podinfo", + Image: "quay.io/stefanprodan/podinfo:1.2.0", + Command: []string{ + "./podinfo", + "--port=9898", + }, + Args: nil, + WorkingDir: "", + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 9898, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "http-metrics", + ContainerPort: 8080, + Protocol: corev1.ProtocolTCP, + }, + { + ContainerPort: 8888, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "PODINFO_UI_COLOR", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-env", + }, + Key: "color", + }, + }, + }, + { + Name: "API_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-secret-env", + }, + Key: "apiKey", + }, + }, + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-all-env", + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-secret-all-env", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/podinfo/config", + ReadOnly: true, + }, + { + Name: "secret", + MountPath: "/etc/podinfo/secret", + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-vol", + }, + }, + }, + }, + { + Name: "secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "podinfo-secret-vol", + }, + }, + }, + }, + }, + }, + }, + } + + return d +} + +func newDaemonSetTestDaemonSetV2() *appsv1.DaemonSet { + d := &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo", + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "podinfo", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "podinfo", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "podinfo", + Image: "quay.io/stefanprodan/podinfo:1.2.1", + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 9898, + Protocol: corev1.ProtocolTCP, + }, + }, + Command: []string{ + "./podinfo", + "--port=9898", + }, + Env: []corev1.EnvVar{ + { + Name: "PODINFO_UI_COLOR", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-env", + }, + Key: "color", + }, + }, + }, + { + Name: "API_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-secret-env", + }, + Key: "apiKey", + }, + }, + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-all-env", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/etc/podinfo/config", + ReadOnly: true, + }, + { + Name: "secret", + MountPath: "/etc/podinfo/secret", + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "podinfo-config-vol", + }, + }, + }, + }, + { + Name: "secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "podinfo-secret-vol", + }, + }, + }, + }, + }, + }, + }, + } + + return d +} + +func newDaemonSetTestService() *corev1.Service { + d := &corev1.Service{ + TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo", + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "podinfo", + }, + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: 9898, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString("http"), + }, + }, + }, + } + + return d +} + +func newDaemonSetTestServiceV2() *corev1.Service { + d := &corev1.Service{ + TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "podinfo", + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "podinfo-v2", + }, + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: 9898, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString("http"), + }, + }, + }, + } + + return d +} + +func newDaemonSetTestMetricTemplate() *flaggerv1.MetricTemplate { + provider := flaggerv1.MetricTemplateProvider{ + Type: "prometheus", + Address: "fake", + SecretRef: &corev1.LocalObjectReference{ + Name: "podinfo-secret-env", + }, + } + + template := &flaggerv1.MetricTemplate{ + TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "envoy", + }, + Spec: flaggerv1.MetricTemplateSpec{ + Provider: provider, + Query: `sum(envoy_cluster_upstream_rq{envoy_cluster_name=~"{{ namespace }}_{{ target }}"})`, + }, + } + return template +} + +func newDaemonSetTestAlertProviderSecret() *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "alert-secret", + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "address": []byte("http://mock.slack"), + }, + } +} + +func newDaemonSetTestAlertProvider() *flaggerv1.AlertProvider { + return &flaggerv1.AlertProvider{ + TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "slack", + }, + Spec: flaggerv1.AlertProviderSpec{ + Type: "slack", + Address: "http://fake.slack", + SecretRef: &corev1.LocalObjectReference{ + Name: "alert-secret", + }, + }, + } +} diff --git a/pkg/controller/scheduler_daemonset_test.go b/pkg/controller/scheduler_daemonset_test.go new file mode 100644 index 000000000..89dfef51c --- /dev/null +++ b/pkg/controller/scheduler_daemonset_test.go @@ -0,0 +1,678 @@ +package controller + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" + "github.com/weaveworks/flagger/pkg/notifier" +) + +func TestScheduler_DaemonSetInit(t *testing.T) { + mocks := newDaemonSetFixture(nil) + mocks.ctrl.advanceCanary("podinfo", "default", true) + + _, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } +} + +func TestScheduler_DaemonSetNewRevision(t *testing.T) { + mocks := newDaemonSetFixture(nil) + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // update + dep2 := newDaemonSetTestDaemonSetV2() + _, err := mocks.kubeClient.AppsV1().DaemonSets("default").Update(dep2) + if err != nil { + t.Fatal(err.Error()) + } + + // detect changes + mocks.ctrl.advanceCanary("podinfo", "default", true) + + _, err = mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } +} + +func TestScheduler_DaemonSetRollback(t *testing.T) { + mocks := newDaemonSetFixture(nil) + // init + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // update failed checks to max + err := mocks.deployer.SyncStatus(mocks.canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseProgressing, FailedChecks: 10}) + if err != nil { + t.Fatal(err.Error()) + } + + // set a metric check to fail + c, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + cd := c.DeepCopy() + cd.Spec.CanaryAnalysis.Metrics = append(c.Spec.CanaryAnalysis.Metrics, flaggerv1.CanaryMetric{ + Name: "fail", + Interval: "1m", + ThresholdRange: &flaggerv1.CanaryThresholdRange{ + Min: toFloatPtr(0), + Max: toFloatPtr(50), + }, + Query: "fail", + }) + _, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Update(cd) + if err != nil { + t.Fatal(err.Error()) + } + + // run metric checks + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // finalise analysis + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // check status + c, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if c.Status.Phase != flaggerv1.CanaryPhaseFailed { + t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseFailed) + } +} + +func TestScheduler_DaemonSetSkipAnalysis(t *testing.T) { + mocks := newDaemonSetFixture(nil) + // init + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // enable skip + cd, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + cd.Spec.SkipAnalysis = true + _, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Update(cd) + if err != nil { + t.Fatal(err.Error()) + } + + // update + dep2 := newDaemonSetTestDaemonSetV2() + _, err = mocks.kubeClient.AppsV1().DaemonSets("default").Update(dep2) + if err != nil { + t.Fatal(err.Error()) + } + + // detect changes + mocks.ctrl.advanceCanary("podinfo", "default", true) + // advance + mocks.ctrl.advanceCanary("podinfo", "default", true) + + c, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + if !c.Spec.SkipAnalysis { + t.Errorf("Got skip analysis %v wanted %v", c.Spec.SkipAnalysis, true) + } + + if c.Status.Phase != flaggerv1.CanaryPhaseSucceeded { + t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseSucceeded) + } +} + +func TestScheduler_DaemonSetNewRevisionReset(t *testing.T) { + mocks := newDaemonSetFixture(nil) + // init + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // first update + dep2 := newDaemonSetTestDaemonSetV2() + _, err := mocks.kubeClient.AppsV1().DaemonSets("default").Update(dep2) + if err != nil { + t.Fatal(err.Error()) + } + + // detect changes + mocks.ctrl.advanceCanary("podinfo", "default", true) + // advance + mocks.ctrl.advanceCanary("podinfo", "default", true) + + primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } + + if primaryWeight != 90 { + t.Errorf("Got primary route %v wanted %v", primaryWeight, 90) + } + + if canaryWeight != 10 { + t.Errorf("Got canary route %v wanted %v", canaryWeight, 10) + } + + if mirrored != false { + t.Errorf("Got mirrored %v wanted %v", mirrored, false) + } + + // second update + dep2.Spec.Template.Spec.ServiceAccountName = "test" + _, err = mocks.kubeClient.AppsV1().DaemonSets("default").Update(dep2) + if err != nil { + t.Fatal(err.Error()) + } + + // detect changes + mocks.ctrl.advanceCanary("podinfo", "default", true) + + primaryWeight, canaryWeight, mirrored, err = mocks.router.GetRoutes(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } + + if primaryWeight != 100 { + t.Errorf("Got primary route %v wanted %v", primaryWeight, 100) + } + + if canaryWeight != 0 { + t.Errorf("Got canary route %v wanted %v", canaryWeight, 0) + } + + if mirrored != false { + t.Errorf("Got mirrored %v wanted %v", mirrored, false) + } +} + +func TestScheduler_DaemonSetPromotion(t *testing.T) { + mocks := newDaemonSetFixture(nil) + + // init + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // check initialized status + c, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if c.Status.Phase != flaggerv1.CanaryPhaseInitialized { + t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseInitialized) + } + + // update + dep2 := newDaemonSetTestDaemonSetV2() + _, err = mocks.kubeClient.AppsV1().DaemonSets("default").Update(dep2) + if err != nil { + t.Fatal(err.Error()) + } + + // detect pod spec changes + mocks.ctrl.advanceCanary("podinfo", "default", true) + + config2 := newDaemonSetTestConfigMapV2() + _, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Update(config2) + if err != nil { + t.Fatal(err.Error()) + } + + secret2 := newDaemonSetTestSecretV2() + _, err = mocks.kubeClient.CoreV1().Secrets("default").Update(secret2) + if err != nil { + t.Fatal(err.Error()) + } + + // detect configs changes + mocks.ctrl.advanceCanary("podinfo", "default", true) + + primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } + + primaryWeight = 60 + canaryWeight = 40 + err = mocks.router.SetRoutes(mocks.canary, primaryWeight, canaryWeight, mirrored) + if err != nil { + t.Fatal(err.Error()) + } + + // advance + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // check progressing status + c, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if c.Status.Phase != flaggerv1.CanaryPhaseProgressing { + t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseProgressing) + } + + // promote + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // check promoting status + c, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if c.Status.Phase != flaggerv1.CanaryPhasePromoting { + t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhasePromoting) + } + + // finalise + mocks.ctrl.advanceCanary("podinfo", "default", true) + + primaryWeight, canaryWeight, mirrored, err = mocks.router.GetRoutes(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } + + if primaryWeight != 100 { + t.Errorf("Got primary route %v wanted %v", primaryWeight, 100) + } + + if canaryWeight != 0 { + t.Errorf("Got canary route %v wanted %v", canaryWeight, 0) + } + + if mirrored != false { + t.Errorf("Got mirrored %v wanted %v", mirrored, false) + } + + primaryDep, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + primaryImage := primaryDep.Spec.Template.Spec.Containers[0].Image + canaryImage := dep2.Spec.Template.Spec.Containers[0].Image + if primaryImage != canaryImage { + t.Errorf("Got primary image %v wanted %v", primaryImage, canaryImage) + } + + configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if configPrimary.Data["color"] != config2.Data["color"] { + t.Errorf("Got primary ConfigMap color %s wanted %s", configPrimary.Data["color"], config2.Data["color"]) + } + + secretPrimary, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-env-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if string(secretPrimary.Data["apiKey"]) != string(secret2.Data["apiKey"]) { + t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret2.Data["apiKey"]) + } + + // check finalising status + c, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if c.Status.Phase != flaggerv1.CanaryPhaseFinalising { + t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseFinalising) + } + + // scale canary to zero + mocks.ctrl.advanceCanary("podinfo", "default", true) + + c, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if c.Status.Phase != flaggerv1.CanaryPhaseSucceeded { + t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseSucceeded) + } +} + +func TestScheduler_DaemonSetMirroring(t *testing.T) { + mocks := newDaemonSetFixture(newDaemonSetTestCanaryMirror()) + // init + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // update + dep2 := newDaemonSetTestDaemonSetV2() + _, err := mocks.kubeClient.AppsV1().DaemonSets("default").Update(dep2) + if err != nil { + t.Fatal(err.Error()) + } + + // detect pod spec changes + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // advance + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // check if traffic is mirrored to canary + primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } + + if primaryWeight != 100 { + t.Errorf("Got primary route %v wanted %v", primaryWeight, 100) + } + + if canaryWeight != 0 { + t.Errorf("Got canary route %v wanted %v", canaryWeight, 0) + } + + if mirrored != true { + t.Errorf("Got mirrored %v wanted %v", mirrored, true) + } + + // advance + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // check if traffic is mirrored to canary + primaryWeight, canaryWeight, mirrored, err = mocks.router.GetRoutes(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } + + if primaryWeight != 90 { + t.Errorf("Got primary route %v wanted %v", primaryWeight, 90) + } + + if canaryWeight != 10 { + t.Errorf("Got canary route %v wanted %v", canaryWeight, 10) + } + + if mirrored != false { + t.Errorf("Got mirrored %v wanted %v", mirrored, false) + } +} + +func TestScheduler_DaemonSetABTesting(t *testing.T) { + mocks := newDaemonSetFixture(newDaemonSetTestCanaryAB()) + // init + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // update + dep2 := newDaemonSetTestDaemonSetV2() + _, err := mocks.kubeClient.AppsV1().DaemonSets("default").Update(dep2) + if err != nil { + t.Fatal(err.Error()) + } + + // detect pod spec changes + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // advance + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // check if traffic is routed to canary + primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary) + if err != nil { + t.Fatal(err.Error()) + } + + if primaryWeight != 0 { + t.Errorf("Got primary route %v wanted %v", primaryWeight, 0) + } + + if canaryWeight != 100 { + t.Errorf("Got canary route %v wanted %v", canaryWeight, 100) + } + + if mirrored != false { + t.Errorf("Got mirrored %v wanted %v", mirrored, false) + } + + cd, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + // set max iterations + if err := mocks.deployer.SetStatusIterations(cd, 10); err != nil { + t.Fatal(err.Error()) + } + + // advance + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // finalising + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // check finalising status + c, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if c.Status.Phase != flaggerv1.CanaryPhaseFinalising { + t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseFinalising) + } + + // check if the container image tag was updated + primaryDep, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + primaryImage := primaryDep.Spec.Template.Spec.Containers[0].Image + canaryImage := dep2.Spec.Template.Spec.Containers[0].Image + if primaryImage != canaryImage { + t.Errorf("Got primary image %v wanted %v", primaryImage, canaryImage) + } + + // shutdown canary + mocks.ctrl.advanceCanary("podinfo", "default", true) + + // check rollout status + c, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if c.Status.Phase != flaggerv1.CanaryPhaseSucceeded { + t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseSucceeded) + } +} + +func TestScheduler_DaemonSetPortDiscovery(t *testing.T) { + mocks := newDaemonSetFixture(nil) + + // enable port discovery + cd, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + cd.Spec.Service.PortDiscovery = true + _, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Update(cd) + if err != nil { + t.Fatal(err.Error()) + } + + mocks.ctrl.advanceCanary("podinfo", "default", true) + + canarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-canary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if len(canarySvc.Spec.Ports) != 3 { + t.Fatalf("Got svc port count %v wanted %v", len(canarySvc.Spec.Ports), 3) + } + + matchPorts := func(lookup string) bool { + switch lookup { + case + "http 9898", + "http-metrics 8080", + "tcp-podinfo-2 8888": + return true + } + return false + } + + for _, port := range canarySvc.Spec.Ports { + if !matchPorts(fmt.Sprintf("%s %v", port.Name, port.Port)) { + t.Fatalf("Got wrong svc port %v", port.Name) + } + + } +} + +func TestScheduler_DaemonSetTargetPortNumber(t *testing.T) { + mocks := newDaemonSetFixture(nil) + + cd, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + cd.Spec.Service.Port = 80 + cd.Spec.Service.TargetPort = intstr.FromInt(9898) + cd.Spec.Service.PortDiscovery = true + _, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Update(cd) + if err != nil { + t.Fatal(err.Error()) + } + + mocks.ctrl.advanceCanary("podinfo", "default", true) + + canarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-canary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if len(canarySvc.Spec.Ports) != 3 { + t.Fatalf("Got svc port count %v wanted %v", len(canarySvc.Spec.Ports), 3) + } + + matchPorts := func(lookup string) bool { + switch lookup { + case + "http 80", + "http-metrics 8080", + "tcp-podinfo-2 8888": + return true + } + return false + } + + for _, port := range canarySvc.Spec.Ports { + if !matchPorts(fmt.Sprintf("%s %v", port.Name, port.Port)) { + t.Fatalf("Got wrong svc port %v", port.Name) + } + + } +} + +func TestScheduler_DaemonSetTargetPortName(t *testing.T) { + mocks := newDaemonSetFixture(nil) + + cd, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + cd.Spec.Service.Port = 8080 + cd.Spec.Service.TargetPort = intstr.FromString("http") + cd.Spec.Service.PortDiscovery = true + _, err = mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Update(cd) + if err != nil { + t.Fatal(err.Error()) + } + + mocks.ctrl.advanceCanary("podinfo", "default", true) + + canarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-canary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if len(canarySvc.Spec.Ports) != 3 { + t.Fatalf("Got svc port count %v wanted %v", len(canarySvc.Spec.Ports), 3) + } + + matchPorts := func(lookup string) bool { + switch lookup { + case + "http 8080", + "http-metrics 8080", + "tcp-podinfo-2 8888": + return true + } + return false + } + + for _, port := range canarySvc.Spec.Ports { + if !matchPorts(fmt.Sprintf("%s %v", port.Name, port.Port)) { + t.Fatalf("Got wrong svc port %v", port.Name) + } + + } +} + +func TestScheduler_DaemonSetAlerts(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + var payload = notifier.SlackPayload{} + err = json.Unmarshal(b, &payload) + if err != nil { + t.Fatal(err) + } + if payload.Attachments[0].AuthorName != "podinfo.default" { + t.Fatal("wrong author name") + } + })) + defer ts.Close() + + canary := newDaemonSetTestCanary() + canary.Spec.CanaryAnalysis.Alerts = []flaggerv1.CanaryAlert{ + { + Name: "slack-dev", + Severity: "info", + ProviderRef: flaggerv1.CrossNamespaceObjectReference{ + Name: "slack", + Namespace: "default", + }, + }, + { + Name: "slack-prod", + Severity: "info", + ProviderRef: flaggerv1.CrossNamespaceObjectReference{ + Name: "slack", + }, + }, + } + mocks := newDaemonSetFixture(canary) + + secret := newDaemonSetTestAlertProviderSecret() + secret.Data = map[string][]byte{ + "address": []byte(ts.URL), + } + _, err := mocks.kubeClient.CoreV1().Secrets("default").Update(secret) + if err != nil { + t.Fatal(err.Error()) + } + + // init canary and send alerts + mocks.ctrl.advanceCanary("podinfo", "default", true) +} diff --git a/pkg/controller/fixture.go b/pkg/controller/scheduler_deployment_fixture_test.go similarity index 91% rename from pkg/controller/fixture.go rename to pkg/controller/scheduler_deployment_fixture_test.go index 8b5c94adb..a3f2db509 100644 --- a/pkg/controller/fixture.go +++ b/pkg/controller/scheduler_deployment_fixture_test.go @@ -1,7 +1,6 @@ package controller import ( - "github.com/weaveworks/flagger/pkg/metrics/observers" "sync" "time" @@ -25,14 +24,10 @@ import ( informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions" "github.com/weaveworks/flagger/pkg/logger" "github.com/weaveworks/flagger/pkg/metrics" + "github.com/weaveworks/flagger/pkg/metrics/observers" "github.com/weaveworks/flagger/pkg/router" ) -var ( - alwaysReady = func() bool { return true } - noResyncPeriodFunc = func() time.Duration { return 0 } -) - type fixture struct { canary *flaggerv1.Canary kubeClient kubernetes.Interface @@ -44,30 +39,30 @@ type fixture struct { router router.Interface } -func newFixture(c *flaggerv1.Canary) fixture { +func newDeploymentFixture(c *flaggerv1.Canary) fixture { if c == nil { - c = newTestCanary() + c = newDeploymentTestCanary() } // init Flagger clientset and register objects flaggerClient := fakeFlagger.NewSimpleClientset( c, - newTestMetricTemplate(), - newTestAlertProvider(), + newDeploymentTestMetricTemplate(), + newDeploymentTestAlertProvider(), ) // init Kubernetes clientset and register objects kubeClient := fake.NewSimpleClientset( - newTestDeployment(), - newTestService(), - newTestHPA(), - newTestConfigMap(), - newTestConfigMapEnv(), - newTestConfigMapVol(), - newTestSecret(), - newTestSecretEnv(), - newTestSecretVol(), - newTestAlertProviderSecret(), + newDeploymentTestDeployment(), + newDeploymentTestService(), + newDeploymentTestHPA(), + newDeploymentTestConfigMap(), + newDeploymentTestConfigMapEnv(), + newDeploymentTestConfigMapVol(), + newDeploymentTestSecret(), + newDeploymentTestSecretEnv(), + newDeploymentTestSecretVol(), + newDeploymentTestAlertProviderSecret(), ) logger, _ := logger.NewLogger("debug") @@ -113,8 +108,8 @@ func newFixture(c *flaggerv1.Canary) fixture { } ctrl.flaggerSynced = alwaysReady ctrl.flaggerInformers.CanaryInformer.Informer().GetIndexer().Add(c) - ctrl.flaggerInformers.MetricInformer.Informer().GetIndexer().Add(newTestMetricTemplate()) - ctrl.flaggerInformers.AlertInformer.Informer().GetIndexer().Add(newTestAlertProvider()) + ctrl.flaggerInformers.MetricInformer.Informer().GetIndexer().Add(newDeploymentTestMetricTemplate()) + ctrl.flaggerInformers.AlertInformer.Informer().GetIndexer().Add(newDeploymentTestAlertProvider()) meshRouter := rf.MeshRouter("istio") @@ -130,7 +125,7 @@ func newFixture(c *flaggerv1.Canary) fixture { } } -func newTestConfigMap() *corev1.ConfigMap { +func newDeploymentTestConfigMap() *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -143,7 +138,7 @@ func newTestConfigMap() *corev1.ConfigMap { } } -func newTestConfigMapV2() *corev1.ConfigMap { +func newDeploymentTestConfigMapV2() *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -157,7 +152,7 @@ func newTestConfigMapV2() *corev1.ConfigMap { } } -func newTestConfigMapEnv() *corev1.ConfigMap { +func newDeploymentTestConfigMapEnv() *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -170,7 +165,7 @@ func newTestConfigMapEnv() *corev1.ConfigMap { } } -func newTestConfigMapVol() *corev1.ConfigMap { +func newDeploymentTestConfigMapVol() *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -183,7 +178,7 @@ func newTestConfigMapVol() *corev1.ConfigMap { } } -func newTestSecret() *corev1.Secret { +func newDeploymentTestSecret() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -199,7 +194,7 @@ func newTestSecret() *corev1.Secret { } } -func newTestSecretV2() *corev1.Secret { +func newDeploymentTestSecretV2() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -215,7 +210,7 @@ func newTestSecretV2() *corev1.Secret { } } -func newTestSecretEnv() *corev1.Secret { +func newDeploymentTestSecretEnv() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -229,7 +224,7 @@ func newTestSecretEnv() *corev1.Secret { } } -func newTestSecretVol() *corev1.Secret { +func newDeploymentTestSecretVol() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -243,7 +238,7 @@ func newTestSecretVol() *corev1.Secret { } } -func newTestCanary() *flaggerv1.Canary { +func newDeploymentTestCanary() *flaggerv1.Canary { cd := &flaggerv1.Canary{ TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -299,18 +294,13 @@ func newTestCanary() *flaggerv1.Canary { return cd } -func toFloatPtr(val int) *float64 { - v := float64(val) - return &v -} - -func newTestCanaryMirror() *flaggerv1.Canary { - cd := newTestCanary() +func newDeploymentTestCanaryMirror() *flaggerv1.Canary { + cd := newDeploymentTestCanary() cd.Spec.CanaryAnalysis.Mirror = true return cd } -func newTestCanaryAB() *flaggerv1.Canary { +func newDeploymentTestCanaryAB() *flaggerv1.Canary { cd := &flaggerv1.Canary{ TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -371,7 +361,7 @@ func newTestCanaryAB() *flaggerv1.Canary { return cd } -func newTestDeployment() *appsv1.Deployment { +func newDeploymentTestDeployment() *appsv1.Deployment { d := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -498,7 +488,7 @@ func newTestDeployment() *appsv1.Deployment { return d } -func newTestDeploymentV2() *appsv1.Deployment { +func newDeploymentTestDeploymentV2() *appsv1.Deployment { d := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -608,7 +598,7 @@ func newTestDeploymentV2() *appsv1.Deployment { return d } -func newTestService() *corev1.Service { +func newDeploymentTestService() *corev1.Service { d := &corev1.Service{ TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -634,7 +624,7 @@ func newTestService() *corev1.Service { return d } -func newTestServiceV2() *corev1.Service { +func newDeploymentTestServiceV2() *corev1.Service { d := &corev1.Service{ TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -660,7 +650,7 @@ func newTestServiceV2() *corev1.Service { return d } -func newTestHPA() *hpav2.HorizontalPodAutoscaler { +func newDeploymentTestHPA() *hpav2.HorizontalPodAutoscaler { h := &hpav2.HorizontalPodAutoscaler{ TypeMeta: metav1.TypeMeta{APIVersion: hpav2.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -688,7 +678,7 @@ func newTestHPA() *hpav2.HorizontalPodAutoscaler { return h } -func newTestMetricTemplate() *flaggerv1.MetricTemplate { +func newDeploymentTestMetricTemplate() *flaggerv1.MetricTemplate { provider := flaggerv1.MetricTemplateProvider{ Type: "prometheus", Address: "fake", @@ -711,7 +701,7 @@ func newTestMetricTemplate() *flaggerv1.MetricTemplate { return template } -func newTestAlertProviderSecret() *corev1.Secret { +func newDeploymentTestAlertProviderSecret() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -725,7 +715,7 @@ func newTestAlertProviderSecret() *corev1.Secret { } } -func newTestAlertProvider() *flaggerv1.AlertProvider { +func newDeploymentTestAlertProvider() *flaggerv1.AlertProvider { return &flaggerv1.AlertProvider{ TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/scheduler_test.go b/pkg/controller/scheduler_deployment_test.go similarity index 91% rename from pkg/controller/scheduler_test.go rename to pkg/controller/scheduler_deployment_test.go index f49a44f53..da72f3758 100644 --- a/pkg/controller/scheduler_test.go +++ b/pkg/controller/scheduler_deployment_test.go @@ -15,8 +15,8 @@ import ( "github.com/weaveworks/flagger/pkg/notifier" ) -func TestScheduler_Init(t *testing.T) { - mocks := newFixture(nil) +func TestScheduler_DeploymentInit(t *testing.T) { + mocks := newDeploymentFixture(nil) mocks.ctrl.advanceCanary("podinfo", "default", true) _, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{}) @@ -25,12 +25,12 @@ func TestScheduler_Init(t *testing.T) { } } -func TestScheduler_NewRevision(t *testing.T) { - mocks := newFixture(nil) +func TestScheduler_DeploymentNewRevision(t *testing.T) { + mocks := newDeploymentFixture(nil) mocks.ctrl.advanceCanary("podinfo", "default", true) // update - dep2 := newTestDeploymentV2() + dep2 := newDeploymentTestDeploymentV2() _, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2) if err != nil { t.Fatal(err.Error()) @@ -49,8 +49,8 @@ func TestScheduler_NewRevision(t *testing.T) { } } -func TestScheduler_Rollback(t *testing.T) { - mocks := newFixture(nil) +func TestScheduler_DeploymentRollback(t *testing.T) { + mocks := newDeploymentFixture(nil) // init mocks.ctrl.advanceCanary("podinfo", "default", true) @@ -103,8 +103,8 @@ func TestScheduler_Rollback(t *testing.T) { } } -func TestScheduler_SkipAnalysis(t *testing.T) { - mocks := newFixture(nil) +func TestScheduler_DeploymentSkipAnalysis(t *testing.T) { + mocks := newDeploymentFixture(nil) // init mocks.ctrl.advanceCanary("podinfo", "default", true) @@ -120,7 +120,7 @@ func TestScheduler_SkipAnalysis(t *testing.T) { } // update - dep2 := newTestDeploymentV2() + dep2 := newDeploymentTestDeploymentV2() _, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2) if err != nil { t.Fatal(err.Error()) @@ -144,13 +144,13 @@ func TestScheduler_SkipAnalysis(t *testing.T) { } } -func TestScheduler_NewRevisionReset(t *testing.T) { - mocks := newFixture(nil) +func TestScheduler_DeploymentNewRevisionReset(t *testing.T) { + mocks := newDeploymentFixture(nil) // init mocks.ctrl.advanceCanary("podinfo", "default", true) // first update - dep2 := newTestDeploymentV2() + dep2 := newDeploymentTestDeploymentV2() _, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2) if err != nil { t.Fatal(err.Error()) @@ -206,8 +206,8 @@ func TestScheduler_NewRevisionReset(t *testing.T) { } } -func TestScheduler_Promotion(t *testing.T) { - mocks := newFixture(nil) +func TestScheduler_DeploymentPromotion(t *testing.T) { + mocks := newDeploymentFixture(nil) // init mocks.ctrl.advanceCanary("podinfo", "default", true) @@ -223,7 +223,7 @@ func TestScheduler_Promotion(t *testing.T) { } // update - dep2 := newTestDeploymentV2() + dep2 := newDeploymentTestDeploymentV2() _, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2) if err != nil { t.Fatal(err.Error()) @@ -232,13 +232,13 @@ func TestScheduler_Promotion(t *testing.T) { // detect pod spec changes mocks.ctrl.advanceCanary("podinfo", "default", true) - config2 := newTestConfigMapV2() + config2 := newDeploymentTestConfigMapV2() _, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Update(config2) if err != nil { t.Fatal(err.Error()) } - secret2 := newTestSecretV2() + secret2 := newDeploymentTestSecretV2() _, err = mocks.kubeClient.CoreV1().Secrets("default").Update(secret2) if err != nil { t.Fatal(err.Error()) @@ -357,13 +357,13 @@ func TestScheduler_Promotion(t *testing.T) { } } -func TestScheduler_Mirroring(t *testing.T) { - mocks := newFixture(newTestCanaryMirror()) +func TestScheduler_DeploymentMirroring(t *testing.T) { + mocks := newDeploymentFixture(newDeploymentTestCanaryMirror()) // init mocks.ctrl.advanceCanary("podinfo", "default", true) // update - dep2 := newTestDeploymentV2() + dep2 := newDeploymentTestDeploymentV2() _, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2) if err != nil { t.Fatal(err.Error()) @@ -415,13 +415,13 @@ func TestScheduler_Mirroring(t *testing.T) { } } -func TestScheduler_ABTesting(t *testing.T) { - mocks := newFixture(newTestCanaryAB()) +func TestScheduler_DeploymentABTesting(t *testing.T) { + mocks := newDeploymentFixture(newDeploymentTestCanaryAB()) // init mocks.ctrl.advanceCanary("podinfo", "default", true) // update - dep2 := newTestDeploymentV2() + dep2 := newDeploymentTestDeploymentV2() _, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2) if err != nil { t.Fatal(err.Error()) @@ -503,8 +503,8 @@ func TestScheduler_ABTesting(t *testing.T) { } } -func TestScheduler_PortDiscovery(t *testing.T) { - mocks := newFixture(nil) +func TestScheduler_DeploymentPortDiscovery(t *testing.T) { + mocks := newDeploymentFixture(nil) // enable port discovery cd, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) @@ -547,8 +547,8 @@ func TestScheduler_PortDiscovery(t *testing.T) { } } -func TestScheduler_TargetPortNumber(t *testing.T) { - mocks := newFixture(nil) +func TestScheduler_DeploymentTargetPortNumber(t *testing.T) { + mocks := newDeploymentFixture(nil) cd, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) if err != nil { @@ -592,8 +592,8 @@ func TestScheduler_TargetPortNumber(t *testing.T) { } } -func TestScheduler_TargetPortName(t *testing.T) { - mocks := newFixture(nil) +func TestScheduler_DeploymentTargetPortName(t *testing.T) { + mocks := newDeploymentFixture(nil) cd, err := mocks.flaggerClient.FlaggerV1beta1().Canaries("default").Get("podinfo", metav1.GetOptions{}) if err != nil { @@ -637,7 +637,7 @@ func TestScheduler_TargetPortName(t *testing.T) { } } -func TestScheduler_Alerts(t *testing.T) { +func TestScheduler_DeploymentAlerts(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { b, err := ioutil.ReadAll(r.Body) if err != nil { @@ -654,7 +654,7 @@ func TestScheduler_Alerts(t *testing.T) { })) defer ts.Close() - canary := newTestCanary() + canary := newDeploymentTestCanary() canary.Spec.CanaryAnalysis.Alerts = []flaggerv1.CanaryAlert{ { Name: "slack-dev", @@ -672,9 +672,9 @@ func TestScheduler_Alerts(t *testing.T) { }, }, } - mocks := newFixture(canary) + mocks := newDeploymentFixture(canary) - secret := newTestAlertProviderSecret() + secret := newDeploymentTestAlertProviderSecret() secret.Data = map[string][]byte{ "address": []byte(ts.URL), } diff --git a/pkg/controller/scheduler_svc_test.go b/pkg/controller/scheduler_svc_test.go index fa2a4691e..5e11d9fcd 100644 --- a/pkg/controller/scheduler_svc_test.go +++ b/pkg/controller/scheduler_svc_test.go @@ -9,7 +9,7 @@ import ( ) func TestScheduler_ServicePromotion(t *testing.T) { - mocks := newFixture(newTestServiceCanary()) + mocks := newDeploymentFixture(newTestServiceCanary()) // init mocks.ctrl.advanceCanary("podinfo", "default", true) @@ -25,7 +25,7 @@ func TestScheduler_ServicePromotion(t *testing.T) { } // update - svc2 := newTestServiceV2() + svc2 := newDeploymentTestServiceV2() _, err = mocks.kubeClient.CoreV1().Services("default").Update(svc2) if err != nil { t.Fatal(err.Error()) diff --git a/pkg/controller/webhook_test.go b/pkg/controller/webhook_test.go index 43143441c..96750148f 100644 --- a/pkg/controller/webhook_test.go +++ b/pkg/controller/webhook_test.go @@ -3,12 +3,13 @@ package controller import ( "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" "net/http" "net/http/httptest" "testing" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" ) diff --git a/test/e2e-daemonset.yaml b/test/e2e-daemonset.yaml new file mode 100644 index 000000000..40cc48b6c --- /dev/null +++ b/test/e2e-daemonset.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: podinfo + namespace: test + labels: + app: podinfo +spec: + minReadySeconds: 5 + revisionHistoryLimit: 5 + selector: + matchLabels: + app: podinfo + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9797" + labels: + app: podinfo + spec: + containers: + - name: podinfod + image: stefanprodan/podinfo:3.1.0 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 9898 + protocol: TCP + - name: http-metrics + containerPort: 9797 + protocol: TCP + - name: grpc + containerPort: 9999 + protocol: TCP + command: + - ./podinfo + - --port=9898 + - --port-metrics=9797 + - --grpc-port=9999 + - --grpc-service-name=podinfo + - --level=info + - --random-delay=false + - --random-error=false + livenessProbe: + httpGet: + port: 9898 + path: /healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + port: 9898 + path: /readyz + initialDelaySeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 1000m + memory: 128Mi + requests: + cpu: 1m + memory: 16Mi diff --git a/test/e2e-kubernetes-tests-daemonset.sh b/test/e2e-kubernetes-tests-daemonset.sh new file mode 100755 index 000000000..2b35c3bcc --- /dev/null +++ b/test/e2e-kubernetes-tests-daemonset.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash + +# This script runs e2e tests for Blue/Green initialization, analysis and promotion +# Prerequisites: Kubernetes Kind, Kustomize + +set -o errexit + +REPO_ROOT=$(git rev-parse --show-toplevel) + +echo '>>> Creating test namespace' +kubectl create namespace test + +echo '>>> Installing the load tester' +kubectl apply -k ${REPO_ROOT}/kustomize/tester +kubectl -n test rollout status deployment/flagger-loadtester + +echo '>>> Initialising canary' +kubectl apply -f ${REPO_ROOT}/test/e2e-daemonset.yaml + +cat <>> Waiting for primary to be ready' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n flagger-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +echo '✔ Canary initialization test passed' + +echo '>>> Triggering canary daemonset' +kubectl -n test set image daemonset/podinfo podinfod=stefanprodan/podinfo:3.1.1 + +echo '>>> Waiting for canary promotion' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test describe daemonset/podinfo-primary | grep '3.1.1' && ok=true || ok=false + sleep 10 + kubectl -n flagger-system logs deployment/flagger --tail 1 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n test describe daemonset/podinfo + kubectl -n test describe daemonset/podinfo-primary + kubectl -n flagger-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +echo '✔ Canary promotion test passed' + +kubectl -n flagger-system logs deployment/flagger diff --git a/test/e2e-kubernetes-tests.sh b/test/e2e-kubernetes-tests-deployment.sh similarity index 100% rename from test/e2e-kubernetes-tests.sh rename to test/e2e-kubernetes-tests-deployment.sh