diff --git a/charts/flagger/README.md b/charts/flagger/README.md index fc9931322..6af129182 100644 --- a/charts/flagger/README.md +++ b/charts/flagger/README.md @@ -74,34 +74,35 @@ The following tables lists the configurable parameters of the Flagger chart and Parameter | Description | Default --- | --- | --- -`image.repository` | image repository | `weaveworks/flagger` -`image.tag` | image tag | `` -`image.pullPolicy` | image pull policy | `IfNotPresent` -`prometheus.install` | if `true`, installs Prometheus configured to scrape all pods in the custer including the App Mesh sidecar | `false` +`image.repository` | Image repository | `weaveworks/flagger` +`image.tag` | Image tag | `` +`image.pullPolicy` | Image pull policy | `IfNotPresent` +`prometheus.install` | If `true`, installs Prometheus configured to scrape all pods in the custer including the App Mesh sidecar | `false` `metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090` -`selectorLabels` | list of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name` +`selectorLabels` | List of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name` +`configTracking.enabled` | If `true`, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment | `true` +`eventWebhook` | If set, Flagger will publish events to the given webhook | None `slack.url` | Slack incoming webhook | None `slack.channel` | Slack channel | None `slack.user` | Slack username | `flagger` -`eventWebhook` | If set, Flagger will publish events to the given webhook | None `msteams.url` | Microsoft Teams incoming webhook | None -`podMonitor.enabled` | if `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false` -`podMonitor.namespace` | the namespace where the PodMonitor is created | the same namespace -`podMonitor.interval` | interval at which metrics should be scraped | `15s` -`podMonitor.podMonitor` | additional labels to add to the PodMonitor | `{}` -`leaderElection.enabled` | leader election must be enabled when running more than one replica | `false` -`leaderElection.replicaCount` | number of replicas | `1` -`ingressAnnotationsPrefix` | annotations prefix for ingresses | `custom.ingress.kubernetes.io` -`rbac.create` | if `true`, create and use RBAC resources | `true` +`podMonitor.enabled` | If `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false` +`podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace +`podMonitor.interval` | Interval at which metrics should be scraped | `15s` +`podMonitor.podMonitor` | Additional labels to add to the PodMonitor | `{}` +`leaderElection.enabled` | If `true`, Flagger will run in HA mode | `false` +`leaderElection.replicaCount` | Number of replicas | `1` +`ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io` +`rbac.create` | If `true`, create and use RBAC resources | `true` `rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` -`crd.create` | if `true`, create Flagger's CRDs | `true` -`resources.requests/cpu` | pod CPU request | `10m` -`resources.requests/memory` | pod memory request | `32Mi` -`resources.limits/cpu` | pod CPU limit | `1000m` -`resources.limits/memory` | pod memory limit | `512Mi` -`affinity` | node/pod affinities | None -`nodeSelector` | node labels for pod assignment | `{}` -`tolerations` | list of node taints to tolerate | `[]` +`crd.create` | If `true`, create Flagger's CRDs | `true` +`resources.requests/cpu` | Pod CPU request | `10m` +`resources.requests/memory` | Pod memory request | `32Mi` +`resources.limits/cpu` | Pod CPU limit | `1000m` +`resources.limits/memory` | Pod memory limit | `512Mi` +`affinity` | Node/pod affinities | None +`nodeSelector` | Node labels for pod assignment | `{}` +`tolerations` | List of node taints to tolerate | `[]` Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example, diff --git a/charts/flagger/templates/deployment.yaml b/charts/flagger/templates/deployment.yaml index 31d5b4238..5941d8fe1 100644 --- a/charts/flagger/templates/deployment.yaml +++ b/charts/flagger/templates/deployment.yaml @@ -9,8 +9,10 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} spec: replicas: {{ .Values.leaderElection.replicaCount }} + {{- if eq .Values.leaderElection.enabled false }} strategy: type: Recreate + {{- end }} selector: matchLabels: app.kubernetes.io/name: {{ template "flagger.name" . }} @@ -64,6 +66,9 @@ spec: {{- if .Values.selectorLabels }} - -selector-labels={{ .Values.selectorLabels }} {{- end }} + {{- if .Values.configTracking }} + - -enable-config-tracking={{ .Values.configTracking.enabled }} + {{- end }} {{- if .Values.namespace }} - -namespace={{ .Values.namespace }} {{- end }} diff --git a/charts/flagger/values.yaml b/charts/flagger/values.yaml index e03d68822..826c551ac 100644 --- a/charts/flagger/values.yaml +++ b/charts/flagger/values.yaml @@ -23,15 +23,19 @@ namespace: "" # defaults to: app,name,app.kubernetes.io/name selectorLabels: "" +# when enabled, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment (enabled by default) +configTracking: + enabled: true + +# when specified, flagger will publish events to the provided webhook +eventWebhook: "" + slack: user: flagger channel: # incoming webhook https://api.slack.com/incoming-webhooks url: -# when specified, flagger will publish events to the provided webhook -eventWebhook: "" - msteams: # MS Teams incoming webhook URL url: @@ -91,5 +95,5 @@ nodeSelector: {} tolerations: [] prometheus: - # to be used with AppMesh or nginx ingress + # to be used with ingress controllers install: false diff --git a/cmd/flagger/main.go b/cmd/flagger/main.go index e8e29e6b5..5b8ef3764 100644 --- a/cmd/flagger/main.go +++ b/cmd/flagger/main.go @@ -56,6 +56,7 @@ var ( ingressAnnotationsPrefix string enableLeaderElection bool leaderElectionNamespace string + enableConfigTracking bool ver bool ) @@ -80,6 +81,7 @@ func init() { flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for ingresses.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election.") flag.StringVar(&leaderElectionNamespace, "leader-election-namespace", "kube-system", "Namespace used to create the leader election config map.") + flag.BoolVar(&enableConfigTracking, "enable-config-tracking", true, "Enable secrets and configmaps tracking.") flag.BoolVar(&ver, "version", false, "Print version") } @@ -173,11 +175,18 @@ func main() { } routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, logger, meshClient) - configTracker := canary.ConfigTracker{ - Logger: logger, - KubeClient: kubeClient, - FlaggerClient: flaggerClient, + + var configTracker canary.Tracker + if enableConfigTracking { + configTracker = &canary.ConfigTracker{ + Logger: logger, + KubeClient: kubeClient, + FlaggerClient: flaggerClient, + } + } else { + configTracker = &canary.NopTracker{} } + canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, logger) c := controller.NewController( diff --git a/pkg/canary/config_tracker.go b/pkg/canary/config_tracker.go new file mode 100644 index 000000000..072c4b976 --- /dev/null +++ b/pkg/canary/config_tracker.go @@ -0,0 +1,376 @@ +package canary + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" + clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned" +) + +// ConfigTracker is managing the operations for Kubernetes ConfigMaps and Secrets +type ConfigTracker struct { + KubeClient kubernetes.Interface + FlaggerClient clientset.Interface + Logger *zap.SugaredLogger +} + +type ConfigRefType string + +const ( + ConfigRefMap ConfigRefType = "configmap" + ConfigRefSecret ConfigRefType = "secret" +) + +// ConfigRef holds the reference to a tracked Kubernetes ConfigMap or Secret +type ConfigRef struct { + Name string + Type ConfigRefType + Checksum string +} + +// GetName returns the config ref type and name +func (c *ConfigRef) GetName() string { + return fmt.Sprintf("%s/%s", c.Type, c.Name) +} + +func checksum(data interface{}) string { + jsonBytes, _ := json.Marshal(data) + hashBytes := sha256.Sum256(jsonBytes) + + return fmt.Sprintf("%x", hashBytes[:8]) +} + +// getRefFromConfigMap transforms a Kubernetes ConfigMap into a ConfigRef +// and computes the checksum of the ConfigMap data +func (ct *ConfigTracker) getRefFromConfigMap(name string, namespace string) (*ConfigRef, error) { + config, err := ct.KubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return &ConfigRef{ + Name: config.Name, + Type: ConfigRefMap, + Checksum: checksum(config.Data), + }, nil +} + +// getRefFromConfigMap transforms a Kubernetes Secret into a ConfigRef +// and computes the checksum of the Secret data +func (ct *ConfigTracker) getRefFromSecret(name string, namespace string) (*ConfigRef, error) { + secret, err := ct.KubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // ignore registry secrets (those should be set via service account) + if secret.Type != corev1.SecretTypeOpaque && + secret.Type != corev1.SecretTypeBasicAuth && + secret.Type != corev1.SecretTypeSSHAuth && + secret.Type != corev1.SecretTypeTLS { + ct.Logger.Debugf("ignoring secret %s.%s type not supported %v", name, namespace, secret.Type) + return nil, nil + } + + return &ConfigRef{ + Name: secret.Name, + Type: ConfigRefSecret, + Checksum: checksum(secret.Data), + }, nil +} + +// GetTargetConfigs scans the target deployment for Kubernetes ConfigMaps and Secretes +// and returns a list of config references +func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]ConfigRef, error) { + res := make(map[string]ConfigRef) + targetName := cd.Spec.TargetRef.Name + targetDep, err := ct.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return res, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace) + } + return res, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err) + } + + // scan volumes + for _, volume := range targetDep.Spec.Template.Spec.Volumes { + if cmv := volume.ConfigMap; cmv != nil { + config, err := ct.getRefFromConfigMap(cmv.Name, cd.Namespace) + if err != nil { + ct.Logger.Errorf("configMap %s.%s query error %v", cmv.Name, cd.Namespace, err) + continue + } + if config != nil { + res[config.GetName()] = *config + } + } + + if sv := volume.Secret; sv != nil { + secret, err := ct.getRefFromSecret(sv.SecretName, cd.Namespace) + if err != nil { + ct.Logger.Errorf("secret %s.%s query error %v", sv.SecretName, cd.Namespace, err) + continue + } + if secret != nil { + res[secret.GetName()] = *secret + } + } + } + // scan containers + for _, container := range targetDep.Spec.Template.Spec.Containers { + // scan env + for _, env := range container.Env { + if env.ValueFrom != nil { + switch { + case env.ValueFrom.ConfigMapKeyRef != nil: + name := env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name + config, err := ct.getRefFromConfigMap(name, cd.Namespace) + if err != nil { + ct.Logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err) + continue + } + if config != nil { + res[config.GetName()] = *config + } + case env.ValueFrom.SecretKeyRef != nil: + name := env.ValueFrom.SecretKeyRef.LocalObjectReference.Name + secret, err := ct.getRefFromSecret(name, cd.Namespace) + if err != nil { + ct.Logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err) + continue + } + if secret != nil { + res[secret.GetName()] = *secret + } + } + } + } + // scan envFrom + for _, envFrom := range container.EnvFrom { + switch { + case envFrom.ConfigMapRef != nil: + name := envFrom.ConfigMapRef.LocalObjectReference.Name + config, err := ct.getRefFromConfigMap(name, cd.Namespace) + if err != nil { + ct.Logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err) + continue + } + if config != nil { + res[config.GetName()] = *config + } + case envFrom.SecretRef != nil: + name := envFrom.SecretRef.LocalObjectReference.Name + secret, err := ct.getRefFromSecret(name, cd.Namespace) + if err != nil { + ct.Logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err) + continue + } + if secret != nil { + res[secret.GetName()] = *secret + } + } + } + } + + return res, nil +} + +// GetConfigRefs returns a map of configs and their checksum +func (ct *ConfigTracker) GetConfigRefs(cd *flaggerv1.Canary) (*map[string]string, error) { + res := make(map[string]string) + configs, err := ct.GetTargetConfigs(cd) + if err != nil { + return nil, err + } + + for _, cfg := range configs { + res[cfg.GetName()] = cfg.Checksum + } + + return &res, nil +} + +// HasConfigChanged checks for changes in ConfigMaps and Secretes by comparing +// the checksum for each ConfigRef stored in Canary.Status.TrackedConfigs +func (ct *ConfigTracker) HasConfigChanged(cd *flaggerv1.Canary) (bool, error) { + configs, err := ct.GetTargetConfigs(cd) + if err != nil { + return false, err + } + + if len(configs) == 0 && cd.Status.TrackedConfigs == nil { + return false, nil + } + + if len(configs) > 0 && cd.Status.TrackedConfigs == nil { + return true, nil + } + + trackedConfigs := *cd.Status.TrackedConfigs + + if len(configs) != len(trackedConfigs) { + return true, nil + } + + for _, cfg := range configs { + if trackedConfigs[cfg.GetName()] != cfg.Checksum { + ct.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)). + Infof("%s %s has changed", cfg.Type, cfg.Name) + return true, nil + } + } + + return false, nil +} + +// CreatePrimaryConfigs syncs the primary Kubernetes ConfigMaps and Secretes +// with those found in the target deployment +func (ct *ConfigTracker) CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[string]ConfigRef) error { + for _, ref := range refs { + switch ref.Type { + case ConfigRefMap: + config, err := ct.KubeClient.CoreV1().ConfigMaps(cd.Namespace).Get(ref.Name, metav1.GetOptions{}) + if err != nil { + return err + } + primaryName := fmt.Sprintf("%s-primary", config.GetName()) + primaryConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: primaryName, + Namespace: cd.Namespace, + Labels: config.Labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(cd, schema.GroupVersionKind{ + Group: flaggerv1.SchemeGroupVersion.Group, + Version: flaggerv1.SchemeGroupVersion.Version, + Kind: flaggerv1.CanaryKind, + }), + }, + }, + Data: config.Data, + } + + // update or insert primary ConfigMap + _, err = ct.KubeClient.CoreV1().ConfigMaps(cd.Namespace).Update(primaryConfigMap) + if err != nil { + if errors.IsNotFound(err) { + _, err = ct.KubeClient.CoreV1().ConfigMaps(cd.Namespace).Create(primaryConfigMap) + if err != nil { + return err + } + } else { + return err + } + } + + ct.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)). + Infof("ConfigMap %s synced", primaryConfigMap.GetName()) + case ConfigRefSecret: + secret, err := ct.KubeClient.CoreV1().Secrets(cd.Namespace).Get(ref.Name, metav1.GetOptions{}) + if err != nil { + return err + } + primaryName := fmt.Sprintf("%s-primary", secret.GetName()) + primarySecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: primaryName, + Namespace: cd.Namespace, + Labels: secret.Labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(cd, schema.GroupVersionKind{ + Group: flaggerv1.SchemeGroupVersion.Group, + Version: flaggerv1.SchemeGroupVersion.Version, + Kind: flaggerv1.CanaryKind, + }), + }, + }, + Type: secret.Type, + Data: secret.Data, + } + + // update or insert primary Secret + _, err = ct.KubeClient.CoreV1().Secrets(cd.Namespace).Update(primarySecret) + if err != nil { + if errors.IsNotFound(err) { + _, err = ct.KubeClient.CoreV1().Secrets(cd.Namespace).Create(primarySecret) + if err != nil { + return err + } + } else { + return err + } + } + + ct.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)). + Infof("Secret %s synced", primarySecret.GetName()) + } + } + + return nil +} + +// ApplyPrimaryConfigs appends the primary suffix to all ConfigMaps and Secretes found in the PodSpec +func (ct *ConfigTracker) ApplyPrimaryConfigs(spec corev1.PodSpec, refs map[string]ConfigRef) corev1.PodSpec { + // update volumes + for i, volume := range spec.Volumes { + if cmv := volume.ConfigMap; cmv != nil { + name := fmt.Sprintf("%s/%s", ConfigRefMap, cmv.Name) + if _, exists := refs[name]; exists { + spec.Volumes[i].ConfigMap.Name += "-primary" + } + } + + if sv := volume.Secret; sv != nil { + name := fmt.Sprintf("%s/%s", ConfigRefSecret, sv.SecretName) + if _, exists := refs[name]; exists { + spec.Volumes[i].Secret.SecretName += "-primary" + } + } + } + // update containers + for _, container := range spec.Containers { + // update env + for i, env := range container.Env { + if env.ValueFrom != nil { + switch { + case env.ValueFrom.ConfigMapKeyRef != nil: + name := fmt.Sprintf("%s/%s", ConfigRefMap, env.ValueFrom.ConfigMapKeyRef.Name) + if _, exists := refs[name]; exists { + container.Env[i].ValueFrom.ConfigMapKeyRef.Name += "-primary" + } + case env.ValueFrom.SecretKeyRef != nil: + name := fmt.Sprintf("%s/%s", ConfigRefSecret, env.ValueFrom.SecretKeyRef.Name) + if _, exists := refs[name]; exists { + container.Env[i].ValueFrom.SecretKeyRef.Name += "-primary" + } + } + } + } + // update envFrom + for i, envFrom := range container.EnvFrom { + switch { + case envFrom.ConfigMapRef != nil: + name := fmt.Sprintf("%s/%s", ConfigRefMap, envFrom.ConfigMapRef.Name) + if _, exists := refs[name]; exists { + container.EnvFrom[i].ConfigMapRef.Name += "-primary" + } + case envFrom.SecretRef != nil: + name := fmt.Sprintf("%s/%s", ConfigRefSecret, envFrom.SecretRef.Name) + if _, exists := refs[name]; exists { + container.EnvFrom[i].SecretRef.Name += "-primary" + } + } + } + } + + return spec +} diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index 2ab7358c6..3fc488bdd 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -25,7 +25,7 @@ type DeploymentController struct { kubeClient kubernetes.Interface flaggerClient clientset.Interface logger *zap.SugaredLogger - configTracker ConfigTracker + configTracker Tracker labels []string } diff --git a/pkg/canary/deployment_controller_test.go b/pkg/canary/deployment_controller_test.go index e042f120f..04923aa7f 100644 --- a/pkg/canary/deployment_controller_test.go +++ b/pkg/canary/deployment_controller_test.go @@ -1,6 +1,7 @@ package canary import ( + "k8s.io/apimachinery/pkg/api/errors" "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,6 +21,11 @@ func TestCanaryDeployer_Sync(t *testing.T) { t.Fatal(err.Error()) } + configName := depPrimary.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name + if configName != "podinfo-config-vol-primary" { + t.Errorf("Got config name %v wanted %v", configName, "podinfo-config-vol-primary") + } + dep := newTestDeployment() configMap := NewTestConfigMap() secret := NewTestSecret() @@ -302,3 +308,28 @@ func TestCanaryDeployer_Scale(t *testing.T) { t.Errorf("Got replicas %v wanted %v", *c.Spec.Replicas, 2) } } + +func TestCanaryDeployer_NoConfigTracking(t *testing.T) { + mocks := SetupMocks() + mocks.deployer.configTracker = &NopTracker{} + + err := mocks.deployer.Initialize(mocks.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + + depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + _, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{}) + if !errors.IsNotFound(err) { + t.Fatalf("Primary ConfigMap shouldn't have been created") + } + + configName := depPrimary.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name + if configName != "podinfo-config-vol" { + t.Errorf("Got config name %v wanted %v", configName, "podinfo-config-vol") + } +} diff --git a/pkg/canary/factory.go b/pkg/canary/factory.go index d915fcdcd..3810374e2 100644 --- a/pkg/canary/factory.go +++ b/pkg/canary/factory.go @@ -11,13 +11,13 @@ type Factory struct { kubeClient kubernetes.Interface flaggerClient clientset.Interface logger *zap.SugaredLogger - configTracker ConfigTracker + configTracker Tracker labels []string } func NewFactory(kubeClient kubernetes.Interface, flaggerClient clientset.Interface, - configTracker ConfigTracker, + configTracker Tracker, labels []string, logger *zap.SugaredLogger) *Factory { return &Factory{ @@ -35,11 +35,7 @@ func (factory *Factory) Controller(kind string) Controller { kubeClient: factory.kubeClient, flaggerClient: factory.flaggerClient, labels: factory.labels, - configTracker: ConfigTracker{ - Logger: factory.logger, - KubeClient: factory.kubeClient, - FlaggerClient: factory.flaggerClient, - }, + configTracker: factory.configTracker, } serviceCtrl := &ServiceController{ logger: factory.logger, @@ -55,5 +51,4 @@ func (factory *Factory) Controller(kind string) Controller { default: return deploymentCtrl } - } diff --git a/pkg/canary/mock.go b/pkg/canary/mock.go index b3f99dc2e..6e93a5add 100644 --- a/pkg/canary/mock.go +++ b/pkg/canary/mock.go @@ -48,7 +48,7 @@ func SetupMocks() Mocks { kubeClient: kubeClient, logger: logger, labels: []string{"app", "name"}, - configTracker: ConfigTracker{ + configTracker: &ConfigTracker{ Logger: logger, KubeClient: kubeClient, FlaggerClient: flaggerClient, diff --git a/pkg/canary/nop_tracker.go b/pkg/canary/nop_tracker.go new file mode 100644 index 000000000..d2234a0b0 --- /dev/null +++ b/pkg/canary/nop_tracker.go @@ -0,0 +1,31 @@ +package canary + +import ( + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" + corev1 "k8s.io/api/core/v1" +) + +// NopTracker no-operation tracker +type NopTracker struct { +} + +func (nt *NopTracker) GetTargetConfigs(*flaggerv1.Canary) (map[string]ConfigRef, error) { + res := make(map[string]ConfigRef) + return res, nil +} + +func (nt *NopTracker) GetConfigRefs(*flaggerv1.Canary) (*map[string]string, error) { + return nil, nil +} + +func (nt *NopTracker) HasConfigChanged(*flaggerv1.Canary) (bool, error) { + return false, nil +} + +func (nt *NopTracker) CreatePrimaryConfigs(*flaggerv1.Canary, map[string]ConfigRef) error { + return nil +} + +func (nt *NopTracker) ApplyPrimaryConfigs(spec corev1.PodSpec, refs map[string]ConfigRef) corev1.PodSpec { + return spec +} diff --git a/pkg/canary/tracker.go b/pkg/canary/tracker.go index 072c4b976..961dd21af 100644 --- a/pkg/canary/tracker.go +++ b/pkg/canary/tracker.go @@ -1,376 +1,14 @@ package canary import ( - "crypto/sha256" - "encoding/json" - "fmt" - - "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" - clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned" -) - -// ConfigTracker is managing the operations for Kubernetes ConfigMaps and Secrets -type ConfigTracker struct { - KubeClient kubernetes.Interface - FlaggerClient clientset.Interface - Logger *zap.SugaredLogger -} - -type ConfigRefType string - -const ( - ConfigRefMap ConfigRefType = "configmap" - ConfigRefSecret ConfigRefType = "secret" + corev1 "k8s.io/api/core/v1" ) -// ConfigRef holds the reference to a tracked Kubernetes ConfigMap or Secret -type ConfigRef struct { - Name string - Type ConfigRefType - Checksum string -} - -// GetName returns the config ref type and name -func (c *ConfigRef) GetName() string { - return fmt.Sprintf("%s/%s", c.Type, c.Name) -} - -func checksum(data interface{}) string { - jsonBytes, _ := json.Marshal(data) - hashBytes := sha256.Sum256(jsonBytes) - - return fmt.Sprintf("%x", hashBytes[:8]) -} - -// getRefFromConfigMap transforms a Kubernetes ConfigMap into a ConfigRef -// and computes the checksum of the ConfigMap data -func (ct *ConfigTracker) getRefFromConfigMap(name string, namespace string) (*ConfigRef, error) { - config, err := ct.KubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - return &ConfigRef{ - Name: config.Name, - Type: ConfigRefMap, - Checksum: checksum(config.Data), - }, nil -} - -// getRefFromConfigMap transforms a Kubernetes Secret into a ConfigRef -// and computes the checksum of the Secret data -func (ct *ConfigTracker) getRefFromSecret(name string, namespace string) (*ConfigRef, error) { - secret, err := ct.KubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - // ignore registry secrets (those should be set via service account) - if secret.Type != corev1.SecretTypeOpaque && - secret.Type != corev1.SecretTypeBasicAuth && - secret.Type != corev1.SecretTypeSSHAuth && - secret.Type != corev1.SecretTypeTLS { - ct.Logger.Debugf("ignoring secret %s.%s type not supported %v", name, namespace, secret.Type) - return nil, nil - } - - return &ConfigRef{ - Name: secret.Name, - Type: ConfigRefSecret, - Checksum: checksum(secret.Data), - }, nil -} - -// GetTargetConfigs scans the target deployment for Kubernetes ConfigMaps and Secretes -// and returns a list of config references -func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]ConfigRef, error) { - res := make(map[string]ConfigRef) - targetName := cd.Spec.TargetRef.Name - targetDep, err := ct.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return res, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace) - } - return res, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err) - } - - // scan volumes - for _, volume := range targetDep.Spec.Template.Spec.Volumes { - if cmv := volume.ConfigMap; cmv != nil { - config, err := ct.getRefFromConfigMap(cmv.Name, cd.Namespace) - if err != nil { - ct.Logger.Errorf("configMap %s.%s query error %v", cmv.Name, cd.Namespace, err) - continue - } - if config != nil { - res[config.GetName()] = *config - } - } - - if sv := volume.Secret; sv != nil { - secret, err := ct.getRefFromSecret(sv.SecretName, cd.Namespace) - if err != nil { - ct.Logger.Errorf("secret %s.%s query error %v", sv.SecretName, cd.Namespace, err) - continue - } - if secret != nil { - res[secret.GetName()] = *secret - } - } - } - // scan containers - for _, container := range targetDep.Spec.Template.Spec.Containers { - // scan env - for _, env := range container.Env { - if env.ValueFrom != nil { - switch { - case env.ValueFrom.ConfigMapKeyRef != nil: - name := env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name - config, err := ct.getRefFromConfigMap(name, cd.Namespace) - if err != nil { - ct.Logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err) - continue - } - if config != nil { - res[config.GetName()] = *config - } - case env.ValueFrom.SecretKeyRef != nil: - name := env.ValueFrom.SecretKeyRef.LocalObjectReference.Name - secret, err := ct.getRefFromSecret(name, cd.Namespace) - if err != nil { - ct.Logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err) - continue - } - if secret != nil { - res[secret.GetName()] = *secret - } - } - } - } - // scan envFrom - for _, envFrom := range container.EnvFrom { - switch { - case envFrom.ConfigMapRef != nil: - name := envFrom.ConfigMapRef.LocalObjectReference.Name - config, err := ct.getRefFromConfigMap(name, cd.Namespace) - if err != nil { - ct.Logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err) - continue - } - if config != nil { - res[config.GetName()] = *config - } - case envFrom.SecretRef != nil: - name := envFrom.SecretRef.LocalObjectReference.Name - secret, err := ct.getRefFromSecret(name, cd.Namespace) - if err != nil { - ct.Logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err) - continue - } - if secret != nil { - res[secret.GetName()] = *secret - } - } - } - } - - return res, nil -} - -// GetConfigRefs returns a map of configs and their checksum -func (ct *ConfigTracker) GetConfigRefs(cd *flaggerv1.Canary) (*map[string]string, error) { - res := make(map[string]string) - configs, err := ct.GetTargetConfigs(cd) - if err != nil { - return nil, err - } - - for _, cfg := range configs { - res[cfg.GetName()] = cfg.Checksum - } - - return &res, nil -} - -// HasConfigChanged checks for changes in ConfigMaps and Secretes by comparing -// the checksum for each ConfigRef stored in Canary.Status.TrackedConfigs -func (ct *ConfigTracker) HasConfigChanged(cd *flaggerv1.Canary) (bool, error) { - configs, err := ct.GetTargetConfigs(cd) - if err != nil { - return false, err - } - - if len(configs) == 0 && cd.Status.TrackedConfigs == nil { - return false, nil - } - - if len(configs) > 0 && cd.Status.TrackedConfigs == nil { - return true, nil - } - - trackedConfigs := *cd.Status.TrackedConfigs - - if len(configs) != len(trackedConfigs) { - return true, nil - } - - for _, cfg := range configs { - if trackedConfigs[cfg.GetName()] != cfg.Checksum { - ct.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)). - Infof("%s %s has changed", cfg.Type, cfg.Name) - return true, nil - } - } - - return false, nil -} - -// CreatePrimaryConfigs syncs the primary Kubernetes ConfigMaps and Secretes -// with those found in the target deployment -func (ct *ConfigTracker) CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[string]ConfigRef) error { - for _, ref := range refs { - switch ref.Type { - case ConfigRefMap: - config, err := ct.KubeClient.CoreV1().ConfigMaps(cd.Namespace).Get(ref.Name, metav1.GetOptions{}) - if err != nil { - return err - } - primaryName := fmt.Sprintf("%s-primary", config.GetName()) - primaryConfigMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: primaryName, - Namespace: cd.Namespace, - Labels: config.Labels, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(cd, schema.GroupVersionKind{ - Group: flaggerv1.SchemeGroupVersion.Group, - Version: flaggerv1.SchemeGroupVersion.Version, - Kind: flaggerv1.CanaryKind, - }), - }, - }, - Data: config.Data, - } - - // update or insert primary ConfigMap - _, err = ct.KubeClient.CoreV1().ConfigMaps(cd.Namespace).Update(primaryConfigMap) - if err != nil { - if errors.IsNotFound(err) { - _, err = ct.KubeClient.CoreV1().ConfigMaps(cd.Namespace).Create(primaryConfigMap) - if err != nil { - return err - } - } else { - return err - } - } - - ct.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)). - Infof("ConfigMap %s synced", primaryConfigMap.GetName()) - case ConfigRefSecret: - secret, err := ct.KubeClient.CoreV1().Secrets(cd.Namespace).Get(ref.Name, metav1.GetOptions{}) - if err != nil { - return err - } - primaryName := fmt.Sprintf("%s-primary", secret.GetName()) - primarySecret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: primaryName, - Namespace: cd.Namespace, - Labels: secret.Labels, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(cd, schema.GroupVersionKind{ - Group: flaggerv1.SchemeGroupVersion.Group, - Version: flaggerv1.SchemeGroupVersion.Version, - Kind: flaggerv1.CanaryKind, - }), - }, - }, - Type: secret.Type, - Data: secret.Data, - } - - // update or insert primary Secret - _, err = ct.KubeClient.CoreV1().Secrets(cd.Namespace).Update(primarySecret) - if err != nil { - if errors.IsNotFound(err) { - _, err = ct.KubeClient.CoreV1().Secrets(cd.Namespace).Create(primarySecret) - if err != nil { - return err - } - } else { - return err - } - } - - ct.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)). - Infof("Secret %s synced", primarySecret.GetName()) - } - } - - return nil -} - -// ApplyPrimaryConfigs appends the primary suffix to all ConfigMaps and Secretes found in the PodSpec -func (ct *ConfigTracker) ApplyPrimaryConfigs(spec corev1.PodSpec, refs map[string]ConfigRef) corev1.PodSpec { - // update volumes - for i, volume := range spec.Volumes { - if cmv := volume.ConfigMap; cmv != nil { - name := fmt.Sprintf("%s/%s", ConfigRefMap, cmv.Name) - if _, exists := refs[name]; exists { - spec.Volumes[i].ConfigMap.Name += "-primary" - } - } - - if sv := volume.Secret; sv != nil { - name := fmt.Sprintf("%s/%s", ConfigRefSecret, sv.SecretName) - if _, exists := refs[name]; exists { - spec.Volumes[i].Secret.SecretName += "-primary" - } - } - } - // update containers - for _, container := range spec.Containers { - // update env - for i, env := range container.Env { - if env.ValueFrom != nil { - switch { - case env.ValueFrom.ConfigMapKeyRef != nil: - name := fmt.Sprintf("%s/%s", ConfigRefMap, env.ValueFrom.ConfigMapKeyRef.Name) - if _, exists := refs[name]; exists { - container.Env[i].ValueFrom.ConfigMapKeyRef.Name += "-primary" - } - case env.ValueFrom.SecretKeyRef != nil: - name := fmt.Sprintf("%s/%s", ConfigRefSecret, env.ValueFrom.SecretKeyRef.Name) - if _, exists := refs[name]; exists { - container.Env[i].ValueFrom.SecretKeyRef.Name += "-primary" - } - } - } - } - // update envFrom - for i, envFrom := range container.EnvFrom { - switch { - case envFrom.ConfigMapRef != nil: - name := fmt.Sprintf("%s/%s", ConfigRefMap, envFrom.ConfigMapRef.Name) - if _, exists := refs[name]; exists { - container.EnvFrom[i].ConfigMapRef.Name += "-primary" - } - case envFrom.SecretRef != nil: - name := fmt.Sprintf("%s/%s", ConfigRefSecret, envFrom.SecretRef.Name) - if _, exists := refs[name]; exists { - container.EnvFrom[i].SecretRef.Name += "-primary" - } - } - } - } - - return spec +type Tracker interface { + GetTargetConfigs(cd *flaggerv1.Canary) (map[string]ConfigRef, error) + GetConfigRefs(cd *flaggerv1.Canary) (*map[string]string, error) + HasConfigChanged(cd *flaggerv1.Canary) (bool, error) + CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[string]ConfigRef) error + ApplyPrimaryConfigs(spec corev1.PodSpec, refs map[string]ConfigRef) corev1.PodSpec } diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index c6f8e0e18..f9f88fc0e 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -77,7 +77,7 @@ func SetupMocks(c *flaggerv1.Canary) Mocks { observerFactory, _ := observers.NewFactory("fake") // init canary factory - configTracker := canary.ConfigTracker{ + configTracker := &canary.ConfigTracker{ Logger: logger, KubeClient: kubeClient, FlaggerClient: flaggerClient,