From 8c0978fd1fb3863dd06005778363064ddbfb2915 Mon Sep 17 00:00:00 2001 From: Ashwin Venkatesh Date: Fri, 10 Jun 2022 12:44:52 -0400 Subject: [PATCH] Backfill unit tests for peering dialler (#1265) - Clean up some of the logic in peering dialler and acceptor - Rename handler -> connectWebhook --- .../templates/crd-peeringacceptors.yaml | 6 +- .../consul/templates/crd-peeringdialers.yaml | 6 +- .../api/v1alpha1/peeringacceptor_types.go | 17 +- .../api/v1alpha1/peeringdialer_types.go | 11 + .../api/v1alpha1/zz_generated.deepcopy.go | 1 + ...consul.hashicorp.com_peeringacceptors.yaml | 6 +- .../consul.hashicorp.com_peeringdialers.yaml | 6 +- .../{handler.go => connect_webhook.go} | 144 ++-- ...nt_test.go => connect_webhook_ent_test.go} | 38 +- ...andler_test.go => connect_webhook_test.go} | 46 +- .../connect-inject/consul_sidecar.go | 30 +- .../connect-inject/consul_sidecar_test.go | 24 +- control-plane/connect-inject/container_env.go | 2 +- .../connect-inject/container_env_test.go | 2 +- .../connect-inject/container_init.go | 46 +- .../connect-inject/container_init_test.go | 48 +- .../connect-inject/container_volume.go | 2 +- .../endpoints_controller_test.go | 6 +- control-plane/connect-inject/envoy_sidecar.go | 39 +- .../connect-inject/envoy_sidecar_test.go | 38 +- .../peering_acceptor_controller.go | 109 ++- .../peering_acceptor_controller_test.go | 215 +++--- .../peering_dialer_controller.go | 208 +++--- .../peering_dialer_controller_test.go | 686 +++++++++++++++++- .../subcommand/inject-connect/command.go | 2 +- 25 files changed, 1223 insertions(+), 515 deletions(-) rename control-plane/connect-inject/{handler.go => connect_webhook.go} (83%) rename control-plane/connect-inject/{handler_ent_test.go => connect_webhook_ent_test.go} (97%) rename control-plane/connect-inject/{handler_test.go => connect_webhook_test.go} (98%) diff --git a/charts/consul/templates/crd-peeringacceptors.yaml b/charts/consul/templates/crd-peeringacceptors.yaml index cc7d358319..0350d64c51 100644 --- a/charts/consul/templates/crd-peeringacceptors.yaml +++ b/charts/consul/templates/crd-peeringacceptors.yaml @@ -94,12 +94,12 @@ spec: key: description: Key is the key of the secret generated. type: string - latestHash: - description: ResourceVersion is the resource version for the secret. - type: string name: description: Name is the name of the secret generated. type: string + resourceVersion: + description: ResourceVersion is the resource version for the secret. + type: string type: object type: object type: object diff --git a/charts/consul/templates/crd-peeringdialers.yaml b/charts/consul/templates/crd-peeringdialers.yaml index 150c44f38c..7dbcdb9402 100644 --- a/charts/consul/templates/crd-peeringdialers.yaml +++ b/charts/consul/templates/crd-peeringdialers.yaml @@ -94,12 +94,12 @@ spec: key: description: Key is the key of the secret generated. type: string - latestHash: - description: ResourceVersion is the resource version for the secret. - type: string name: description: Name is the name of the secret generated. type: string + resourceVersion: + description: ResourceVersion is the resource version for the secret. + type: string type: object type: object type: object diff --git a/control-plane/api/v1alpha1/peeringacceptor_types.go b/control-plane/api/v1alpha1/peeringacceptor_types.go index 3f6bd4c7e2..f6dd84c4e8 100644 --- a/control-plane/api/v1alpha1/peeringacceptor_types.go +++ b/control-plane/api/v1alpha1/peeringacceptor_types.go @@ -76,12 +76,15 @@ type ReconcileErrorStatus struct { } type SecretRefStatus struct { - // Name is the name of the secret generated. - Name string `json:"name,omitempty"` - // Key is the key of the secret generated. - Key string `json:"key,omitempty"` - // Backend is where the generated secret is stored. Currently supports the value: "kubernetes". - Backend string `json:"backend,omitempty"` + Secret `json:",inline"` // ResourceVersion is the resource version for the secret. - ResourceVersion string `json:"latestHash,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` +} + +func (pa *PeeringAcceptor) Secret() *Secret { + return pa.Spec.Peer.Secret +} + +func (pa *PeeringAcceptor) SecretRef() *SecretRefStatus { + return pa.Status.SecretRef } diff --git a/control-plane/api/v1alpha1/peeringdialer_types.go b/control-plane/api/v1alpha1/peeringdialer_types.go index f6eaafe839..99385ff0aa 100644 --- a/control-plane/api/v1alpha1/peeringdialer_types.go +++ b/control-plane/api/v1alpha1/peeringdialer_types.go @@ -53,3 +53,14 @@ type PeeringDialerStatus struct { // +optional SecretRef *SecretRefStatus `json:"secret,omitempty"` } + +func (pd *PeeringDialer) Secret() *Secret { + if pd.Spec.Peer == nil { + return nil + } + return pd.Spec.Peer.Secret +} + +func (pd *PeeringDialer) SecretRef() *SecretRefStatus { + return pd.Status.SecretRef +} diff --git a/control-plane/api/v1alpha1/zz_generated.deepcopy.go b/control-plane/api/v1alpha1/zz_generated.deepcopy.go index d525aa0ba4..e5bcbfcc6f 100644 --- a/control-plane/api/v1alpha1/zz_generated.deepcopy.go +++ b/control-plane/api/v1alpha1/zz_generated.deepcopy.go @@ -1179,6 +1179,7 @@ func (in *Secret) DeepCopy() *Secret { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretRefStatus) DeepCopyInto(out *SecretRefStatus) { *out = *in + out.Secret = in.Secret } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretRefStatus. diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_peeringacceptors.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_peeringacceptors.yaml index 645a5e998d..a4a00a7426 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_peeringacceptors.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_peeringacceptors.yaml @@ -87,12 +87,12 @@ spec: key: description: Key is the key of the secret generated. type: string - latestHash: - description: ResourceVersion is the resource version for the secret. - type: string name: description: Name is the name of the secret generated. type: string + resourceVersion: + description: ResourceVersion is the resource version for the secret. + type: string type: object type: object type: object diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_peeringdialers.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_peeringdialers.yaml index 3a0c15b799..c2eef39627 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_peeringdialers.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_peeringdialers.yaml @@ -87,12 +87,12 @@ spec: key: description: Key is the key of the secret generated. type: string - latestHash: - description: ResourceVersion is the resource version for the secret. - type: string name: description: Name is the name of the secret generated. type: string + resourceVersion: + description: ResourceVersion is the resource version for the secret. + type: string type: object type: object type: object diff --git a/control-plane/connect-inject/handler.go b/control-plane/connect-inject/connect_webhook.go similarity index 83% rename from control-plane/connect-inject/handler.go rename to control-plane/connect-inject/connect_webhook.go index f9e43f1181..f2653c2bcc 100644 --- a/control-plane/connect-inject/handler.go +++ b/control-plane/connect-inject/connect_webhook.go @@ -32,7 +32,7 @@ var ( ) // Handler is the HTTP handler for admission webhooks. -type Handler struct { +type ConnectWebhook struct { ConsulClient *api.Client Clientset kubernetes.Interface @@ -170,12 +170,12 @@ type multiPortInfo struct { // Handle is the admission.Handler implementation that actually handles the // webhook request for admission control. This should be registered or // served via the controller runtime manager. -func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.Response { +func (w *ConnectWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { var pod corev1.Pod // Decode the pod from the request - if err := h.decoder.Decode(req, &pod); err != nil { - h.Log.Error(err, "could not unmarshal request to pod") + if err := w.decoder.Decode(req, &pod); err != nil { + w.Log.Error(err, "could not unmarshal request to pod") return admission.Errored(http.StatusBadRequest, err) } @@ -186,40 +186,40 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R return admission.Errored(http.StatusBadRequest, err) } - if err := h.validatePod(pod); err != nil { - h.Log.Error(err, "error validating pod", "request name", req.Name) + if err := w.validatePod(pod); err != nil { + w.Log.Error(err, "error validating pod", "request name", req.Name) return admission.Errored(http.StatusBadRequest, err) } // Setup the default annotation values that are used for the container. // This MUST be done before shouldInject is called since that function // uses these annotations. - if err := h.defaultAnnotations(&pod, string(origPodJson)); err != nil { - h.Log.Error(err, "error creating default annotations", "request name", req.Name) + if err := w.defaultAnnotations(&pod, string(origPodJson)); err != nil { + w.Log.Error(err, "error creating default annotations", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error creating default annotations: %s", err)) } // Check if we should inject, for example we don't inject in the // system namespaces. - if shouldInject, err := h.shouldInject(pod, req.Namespace); err != nil { - h.Log.Error(err, "error checking if should inject", "request name", req.Name) + if shouldInject, err := w.shouldInject(pod, req.Namespace); err != nil { + w.Log.Error(err, "error checking if should inject", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error checking if should inject: %s", err)) } else if !shouldInject { return admission.Allowed(fmt.Sprintf("%s %s does not require injection", pod.Kind, pod.Name)) } - h.Log.Info("received pod", "name", req.Name, "ns", req.Namespace) + w.Log.Info("received pod", "name", req.Name, "ns", req.Namespace) // Add our volume that will be shared by the init container and // the sidecar for passing data in the pod. - pod.Spec.Volumes = append(pod.Spec.Volumes, h.containerVolume()) + pod.Spec.Volumes = append(pod.Spec.Volumes, w.containerVolume()) // Optionally mount data volume to other containers - h.injectVolumeMount(pod) + w.injectVolumeMount(pod) // Add the upstream services as environment variables for easy // service discovery. - containerEnvVars := h.containerEnvVars(pod) + containerEnvVars := w.containerEnvVars(pod) for i := range pod.Spec.InitContainers { pod.Spec.InitContainers[i].Env = append(pod.Spec.InitContainers[i].Env, containerEnvVars...) } @@ -229,35 +229,35 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R } // Add the init container which copies the Consul binary to /consul/connect-inject/. - initCopyContainer := h.initCopyContainer() + initCopyContainer := w.initCopyContainer() pod.Spec.InitContainers = append(pod.Spec.InitContainers, initCopyContainer) // A user can enable/disable tproxy for an entire namespace via a label. - ns, err := h.Clientset.CoreV1().Namespaces().Get(ctx, req.Namespace, metav1.GetOptions{}) + ns, err := w.Clientset.CoreV1().Namespaces().Get(ctx, req.Namespace, metav1.GetOptions{}) if err != nil { - h.Log.Error(err, "error fetching namespace metadata for container", "request name", req.Name) + w.Log.Error(err, "error fetching namespace metadata for container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error getting namespace metadata for container: %s", err)) } // Get service names from the annotation. If theres 0-1 service names, it's a single port pod, otherwise it's multi // port. - annotatedSvcNames := h.annotatedServiceNames(pod) + annotatedSvcNames := w.annotatedServiceNames(pod) multiPort := len(annotatedSvcNames) > 1 // For single port pods, add the single init container and envoy sidecar. if !multiPort { // Add the init container that registers the service and sets up the Envoy configuration. - initContainer, err := h.containerInit(*ns, pod, multiPortInfo{}) + initContainer, err := w.containerInit(*ns, pod, multiPortInfo{}) if err != nil { - h.Log.Error(err, "error configuring injection init container", "request name", req.Name) + w.Log.Error(err, "error configuring injection init container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection init container: %s", err)) } pod.Spec.InitContainers = append(pod.Spec.InitContainers, initContainer) // Add the Envoy sidecar. - envoySidecar, err := h.envoySidecar(*ns, pod, multiPortInfo{}) + envoySidecar, err := w.envoySidecar(*ns, pod, multiPortInfo{}) if err != nil { - h.Log.Error(err, "error configuring injection sidecar container", "request name", req.Name) + w.Log.Error(err, "error configuring injection sidecar container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection sidecar container: %s", err)) } pod.Spec.Containers = append(pod.Spec.Containers, envoySidecar) @@ -269,27 +269,27 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R // service account per service. So, this will look for service accounts whose name matches the service and mount // those tokens if not already specified via the pod's serviceAccountName. - h.Log.Info("processing multiport pod") - err := h.checkUnsupportedMultiPortCases(*ns, pod) + w.Log.Info("processing multiport pod") + err := w.checkUnsupportedMultiPortCases(*ns, pod) if err != nil { - h.Log.Error(err, "checking unsupported cases for multi port pods") + w.Log.Error(err, "checking unsupported cases for multi port pods") return admission.Errored(http.StatusInternalServerError, err) } for i, svc := range annotatedSvcNames { - h.Log.Info(fmt.Sprintf("service: %s", svc)) - if h.AuthMethod != "" { + w.Log.Info(fmt.Sprintf("service: %s", svc)) + if w.AuthMethod != "" { if svc != "" && pod.Spec.ServiceAccountName != svc { - sa, err := h.Clientset.CoreV1().ServiceAccounts(req.Namespace).Get(ctx, svc, metav1.GetOptions{}) + sa, err := w.Clientset.CoreV1().ServiceAccounts(req.Namespace).Get(ctx, svc, metav1.GetOptions{}) if err != nil { - h.Log.Error(err, "couldn't get service accounts") + w.Log.Error(err, "couldn't get service accounts") return admission.Errored(http.StatusInternalServerError, err) } if len(sa.Secrets) == 0 { - h.Log.Info(fmt.Sprintf("service account %s has zero secrets exp at least 1", svc)) + w.Log.Info(fmt.Sprintf("service account %s has zero secrets exp at least 1", svc)) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("service account %s has zero secrets, expected at least one", svc)) } saSecret := sa.Secrets[0].Name - h.Log.Info("found service account, mounting service account secret to Pod", "serviceAccountName", sa.Name) + w.Log.Info("found service account, mounting service account secret to Pod", "serviceAccountName", sa.Name) pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ Name: fmt.Sprintf("%s-service-account", svc), VolumeSource: corev1.VolumeSource{ @@ -308,17 +308,17 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R } // Add the init container that registers the service and sets up the Envoy configuration. - initContainer, err := h.containerInit(*ns, pod, mpi) + initContainer, err := w.containerInit(*ns, pod, mpi) if err != nil { - h.Log.Error(err, "error configuring injection init container", "request name", req.Name) + w.Log.Error(err, "error configuring injection init container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection init container: %s", err)) } pod.Spec.InitContainers = append(pod.Spec.InitContainers, initContainer) // Add the Envoy sidecar. - envoySidecar, err := h.envoySidecar(*ns, pod, mpi) + envoySidecar, err := w.envoySidecar(*ns, pod, mpi) if err != nil { - h.Log.Error(err, "error configuring injection sidecar container", "request name", req.Name) + w.Log.Error(err, "error configuring injection sidecar container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection sidecar container: %s", err)) } pod.Spec.Containers = append(pod.Spec.Containers, envoySidecar) @@ -329,17 +329,17 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R // (that functionality lives in the endpoints-controller), // we only need the consul sidecar to run the metrics merging server. // First, determine if we need to run the metrics merging server. - shouldRunMetricsMerging, err := h.MetricsConfig.shouldRunMergedMetricsServer(pod) + shouldRunMetricsMerging, err := w.MetricsConfig.shouldRunMergedMetricsServer(pod) if err != nil { - h.Log.Error(err, "error determining if metrics merging server should be run", "request name", req.Name) + w.Log.Error(err, "error determining if metrics merging server should be run", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error determining if metrics merging server should be run: %s", err)) } // Add the consul-sidecar only if we need to run the metrics merging server. if shouldRunMetricsMerging { - consulSidecar, err := h.consulSidecar(pod) + consulSidecar, err := w.consulSidecar(pod) if err != nil { - h.Log.Error(err, "error configuring consul sidecar container", "request name", req.Name) + w.Log.Error(err, "error configuring consul sidecar container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring consul sidecar container: %s", err)) } pod.Spec.Containers = append(pod.Spec.Containers, consulSidecar) @@ -350,8 +350,8 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R pod.Annotations[keyInjectStatus] = injected // Add annotations for metrics. - if err = h.prometheusAnnotations(&pod); err != nil { - h.Log.Error(err, "error configuring prometheus annotations", "request name", req.Name) + if err = w.prometheusAnnotations(&pod); err != nil { + w.Log.Error(err, "error configuring prometheus annotations", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring prometheus annotations: %s", err)) } @@ -365,14 +365,14 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R pod.Labels[keyManagedBy] = managedByValue // Consul-ENT only: Add the Consul destination namespace as an annotation to the pod. - if h.EnableNamespaces { - pod.Annotations[annotationConsulNamespace] = h.consulNamespace(req.Namespace) + if w.EnableNamespaces { + pod.Annotations[annotationConsulNamespace] = w.consulNamespace(req.Namespace) } // Overwrite readiness/liveness probes if needed. - err = h.overwriteProbes(*ns, &pod) + err = w.overwriteProbes(*ns, &pod) if err != nil { - h.Log.Error(err, "error overwriting readiness or liveness probes", "request name", req.Name) + w.Log.Error(err, "error overwriting readiness or liveness probes", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error overwriting readiness or liveness probes: %s", err)) } @@ -393,10 +393,10 @@ func (h *Handler) Handle(ctx context.Context, req admission.Request) admission.R // Check and potentially create Consul resources. This is done after // all patches are created to guarantee no errors were encountered in // that process before modifying the Consul cluster. - if h.EnableNamespaces { - if _, err := namespaces.EnsureExists(h.ConsulClient, h.consulNamespace(req.Namespace), h.CrossNamespaceACLPolicy); err != nil { - h.Log.Error(err, "error checking or creating namespace", - "ns", h.consulNamespace(req.Namespace), "request name", req.Name) + if w.EnableNamespaces { + if _, err := namespaces.EnsureExists(w.ConsulClient, w.consulNamespace(req.Namespace), w.CrossNamespaceACLPolicy); err != nil { + w.Log.Error(err, "error checking or creating namespace", + "ns", w.consulNamespace(req.Namespace), "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error checking or creating namespace: %s", err)) } } @@ -418,13 +418,13 @@ func shouldOverwriteProbes(pod corev1.Pod, globalOverwrite bool) (bool, error) { // overwriteProbes overwrites readiness/liveness probes of this pod when // both transparent proxy is enabled and overwrite probes is true for the pod. -func (h *Handler) overwriteProbes(ns corev1.Namespace, pod *corev1.Pod) error { - tproxyEnabled, err := transparentProxyEnabled(ns, *pod, h.EnableTransparentProxy) +func (w *ConnectWebhook) overwriteProbes(ns corev1.Namespace, pod *corev1.Pod) error { + tproxyEnabled, err := transparentProxyEnabled(ns, *pod, w.EnableTransparentProxy) if err != nil { return err } - overwriteProbes, err := shouldOverwriteProbes(*pod, h.TProxyOverwriteProbes) + overwriteProbes, err := shouldOverwriteProbes(*pod, w.TProxyOverwriteProbes) if err != nil { return err } @@ -449,7 +449,7 @@ func (h *Handler) overwriteProbes(ns corev1.Namespace, pod *corev1.Pod) error { return nil } -func (h *Handler) injectVolumeMount(pod corev1.Pod) { +func (w *ConnectWebhook) injectVolumeMount(pod corev1.Pod) { containersToInject := splitCommaSeparatedItemsFromAnnotation(annotationInjectMountVolumes, pod) for index, container := range pod.Spec.Containers { @@ -462,7 +462,7 @@ func (h *Handler) injectVolumeMount(pod corev1.Pod) { } } -func (h *Handler) shouldInject(pod corev1.Pod, namespace string) (bool, error) { +func (w *ConnectWebhook) shouldInject(pod corev1.Pod, namespace string) (bool, error) { // Don't inject in the Kubernetes system namespaces if kubeSystemNamespaces.Contains(namespace) { return false, nil @@ -470,12 +470,12 @@ func (h *Handler) shouldInject(pod corev1.Pod, namespace string) (bool, error) { // Namespace logic // If in deny list, don't inject - if h.DenyK8sNamespacesSet.Contains(namespace) { + if w.DenyK8sNamespacesSet.Contains(namespace) { return false, nil } // If not in allow list or allow list is not *, don't inject - if !h.AllowK8sNamespacesSet.Contains("*") && !h.AllowK8sNamespacesSet.Contains(namespace) { + if !w.AllowK8sNamespacesSet.Contains("*") && !w.AllowK8sNamespacesSet.Contains(namespace) { return false, nil } @@ -491,10 +491,10 @@ func (h *Handler) shouldInject(pod corev1.Pod, namespace string) (bool, error) { return strconv.ParseBool(raw) } - return !h.RequireAnnotation, nil + return !w.RequireAnnotation, nil } -func (h *Handler) defaultAnnotations(pod *corev1.Pod, podJson string) error { +func (w *ConnectWebhook) defaultAnnotations(pod *corev1.Pod, podJson string) error { if pod.Annotations == nil { pod.Annotations = make(map[string]string) } @@ -518,16 +518,16 @@ func (h *Handler) defaultAnnotations(pod *corev1.Pod, podJson string) error { // prometheusAnnotations sets the Prometheus scraping configuration // annotations on the Pod. -func (h *Handler) prometheusAnnotations(pod *corev1.Pod) error { - enableMetrics, err := h.MetricsConfig.enableMetrics(*pod) +func (w *ConnectWebhook) prometheusAnnotations(pod *corev1.Pod) error { + enableMetrics, err := w.MetricsConfig.enableMetrics(*pod) if err != nil { return err } - prometheusScrapePort, err := h.MetricsConfig.prometheusScrapePort(*pod) + prometheusScrapePort, err := w.MetricsConfig.prometheusScrapePort(*pod) if err != nil { return err } - prometheusScrapePath := h.MetricsConfig.prometheusScrapePath(*pod) + prometheusScrapePath := w.MetricsConfig.prometheusScrapePath(*pod) if enableMetrics { pod.Annotations[annotationPrometheusScrape] = "true" @@ -540,11 +540,11 @@ func (h *Handler) prometheusAnnotations(pod *corev1.Pod) error { // consulNamespace returns the namespace that a service should be // registered in based on the namespace options. It returns an // empty string if namespaces aren't enabled. -func (h *Handler) consulNamespace(ns string) string { - return namespaces.ConsulNamespace(ns, h.EnableNamespaces, h.ConsulDestinationNamespace, h.EnableK8SNSMirroring, h.K8SNSMirroringPrefix) +func (w *ConnectWebhook) consulNamespace(ns string) string { + return namespaces.ConsulNamespace(ns, w.EnableNamespaces, w.ConsulDestinationNamespace, w.EnableK8SNSMirroring, w.K8SNSMirroringPrefix) } -func (h *Handler) validatePod(pod corev1.Pod) error { +func (w *ConnectWebhook) validatePod(pod corev1.Pod) error { if _, ok := pod.Annotations[annotationProtocol]; ok { return fmt.Errorf("the %q annotation is no longer supported. Instead, create a ServiceDefaults resource (see www.consul.io/docs/k8s/crds/upgrade-to-crds)", annotationProtocol) @@ -609,7 +609,7 @@ func findServiceAccountVolumeMount(pod corev1.Pod, multiPort bool, multiPortSvcN return volumeMount, "/var/run/secrets/kubernetes.io/serviceaccount/token", nil } -func (h *Handler) annotatedServiceNames(pod corev1.Pod) []string { +func (w *ConnectWebhook) annotatedServiceNames(pod corev1.Pod) []string { var annotatedSvcNames []string if anno, ok := pod.Annotations[annotationService]; ok { annotatedSvcNames = strings.Split(anno, ",") @@ -617,16 +617,16 @@ func (h *Handler) annotatedServiceNames(pod corev1.Pod) []string { return annotatedSvcNames } -func (h *Handler) checkUnsupportedMultiPortCases(ns corev1.Namespace, pod corev1.Pod) error { - tproxyEnabled, err := transparentProxyEnabled(ns, pod, h.EnableTransparentProxy) +func (w *ConnectWebhook) checkUnsupportedMultiPortCases(ns corev1.Namespace, pod corev1.Pod) error { + tproxyEnabled, err := transparentProxyEnabled(ns, pod, w.EnableTransparentProxy) if err != nil { return fmt.Errorf("couldn't check if tproxy is enabled: %s", err) } - metricsEnabled, err := h.MetricsConfig.enableMetrics(pod) + metricsEnabled, err := w.MetricsConfig.enableMetrics(pod) if err != nil { return fmt.Errorf("couldn't check if metrics is enabled: %s", err) } - metricsMergingEnabled, err := h.MetricsConfig.enableMetricsMerging(pod) + metricsMergingEnabled, err := w.MetricsConfig.enableMetricsMerging(pod) if err != nil { return fmt.Errorf("couldn't check if metrics merging is enabled: %s", err) } @@ -642,8 +642,8 @@ func (h *Handler) checkUnsupportedMultiPortCases(ns corev1.Namespace, pod corev1 return nil } -func (h *Handler) InjectDecoder(d *admission.Decoder) error { - h.decoder = d +func (w *ConnectWebhook) InjectDecoder(d *admission.Decoder) error { + w.decoder = d return nil } diff --git a/control-plane/connect-inject/handler_ent_test.go b/control-plane/connect-inject/connect_webhook_ent_test.go similarity index 97% rename from control-plane/connect-inject/handler_ent_test.go rename to control-plane/connect-inject/connect_webhook_ent_test.go index e35fd7aaad..24e5ce7d11 100644 --- a/control-plane/connect-inject/handler_ent_test.go +++ b/control-plane/connect-inject/connect_webhook_ent_test.go @@ -42,13 +42,13 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { cases := []struct { Name string - Handler Handler + Handler ConnectWebhook Req admission.Request ExpectedNamespaces []string }{ { Name: "single destination namespace 'default' from k8s 'default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -70,7 +70,7 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { { Name: "single destination namespace 'default' from k8s 'non-default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -92,7 +92,7 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { { Name: "single destination namespace 'dest' from k8s 'default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -114,7 +114,7 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { { Name: "single destination namespace 'dest' from k8s 'non-default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -136,7 +136,7 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { { Name: "mirroring from k8s 'default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -159,7 +159,7 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { { Name: "mirroring from k8s 'dest'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -182,7 +182,7 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { { Name: "mirroring with prefix from k8s 'default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -206,7 +206,7 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { { Name: "mirroring with prefix from k8s 'dest'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -298,13 +298,13 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { cases := []struct { Name string - Handler Handler + Handler ConnectWebhook Req admission.Request ExpectedNamespaces []string }{ { Name: "acls + single destination namespace 'default' from k8s 'default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -327,7 +327,7 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { { Name: "acls + single destination namespace 'default' from k8s 'non-default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -350,7 +350,7 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { { Name: "acls + single destination namespace 'dest' from k8s 'default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -373,7 +373,7 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { { Name: "acls + single destination namespace 'dest' from k8s 'non-default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -396,7 +396,7 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { { Name: "acls + mirroring from k8s 'default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -420,7 +420,7 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { { Name: "acls + mirroring from k8s 'dest'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -444,7 +444,7 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { { Name: "acls + mirroring with prefix from k8s 'default'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -469,7 +469,7 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { { Name: "acls + mirroring with prefix from k8s 'dest'", - Handler: Handler{ + Handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -640,7 +640,7 @@ func TestHandler_MutateWithNamespaces_Annotation(t *testing.T) { }) require.NoError(err) - handler := Handler{ + handler := ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSet("*"), DenyK8sNamespacesSet: mapset.NewSet(), diff --git a/control-plane/connect-inject/handler_test.go b/control-plane/connect-inject/connect_webhook_test.go similarity index 98% rename from control-plane/connect-inject/handler_test.go rename to control-plane/connect-inject/connect_webhook_test.go index 9eaf5d89d9..92bae3d872 100644 --- a/control-plane/connect-inject/handler_test.go +++ b/control-plane/connect-inject/connect_webhook_test.go @@ -41,14 +41,14 @@ func TestHandlerHandle(t *testing.T) { cases := []struct { Name string - Handler Handler + Handler ConnectWebhook Req admission.Request Err string // expected error string, not exact Patches []jsonpatch.Operation }{ { "kube-system namespace", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -68,7 +68,7 @@ func TestHandlerHandle(t *testing.T) { { "already injected", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -92,7 +92,7 @@ func TestHandlerHandle(t *testing.T) { { "empty pod basic", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -134,7 +134,7 @@ func TestHandlerHandle(t *testing.T) { { "pod with upstreams specified", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -189,7 +189,7 @@ func TestHandlerHandle(t *testing.T) { { "empty pod with injection disabled", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -215,7 +215,7 @@ func TestHandlerHandle(t *testing.T) { { "empty pod with injection truthy", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -266,7 +266,7 @@ func TestHandlerHandle(t *testing.T) { { "pod with empty volume mount annotation", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -316,7 +316,7 @@ func TestHandlerHandle(t *testing.T) { }, { "pod with volume mount annotation", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -387,7 +387,7 @@ func TestHandlerHandle(t *testing.T) { { "pod with service annotation", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -438,7 +438,7 @@ func TestHandlerHandle(t *testing.T) { { "pod with existing label", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -489,7 +489,7 @@ func TestHandlerHandle(t *testing.T) { { "when metrics merging is enabled, we should inject the consul-sidecar and add prometheus annotations", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -567,7 +567,7 @@ func TestHandlerHandle(t *testing.T) { { "tproxy with overwriteProbes is enabled", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -648,7 +648,7 @@ func TestHandlerHandle(t *testing.T) { }, { "multi port pod", - Handler{ + ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -760,7 +760,7 @@ func TestHandler_ErrorsOnDeprecatedAnnotations(t *testing.T) { decoder, err := admission.NewDecoder(s) require.NoError(err) - handler := Handler{ + handler := ConnectWebhook{ Log: logrtest.TestLogger{T: t}, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSet(), @@ -917,7 +917,7 @@ func TestHandlerDefaultAnnotations(t *testing.T) { podJson, err := json.Marshal(tt.Pod) require.NoError(err) - var h Handler + var h ConnectWebhook err = h.defaultAnnotations(tt.Pod, string(podJson)) if (tt.Err != "") != (err != nil) { t.Fatalf("actual: %v, expected err: %v", err, tt.Err) @@ -939,12 +939,12 @@ func TestHandlerDefaultAnnotations(t *testing.T) { func TestHandlerPrometheusAnnotations(t *testing.T) { cases := []struct { Name string - Handler Handler + Handler ConnectWebhook Expected map[string]string }{ { Name: "Sets the correct prometheus annotations on the pod if metrics are enabled", - Handler: Handler{ + Handler: ConnectWebhook{ MetricsConfig: MetricsConfig{ DefaultEnableMetrics: true, DefaultPrometheusScrapePort: "20200", @@ -959,7 +959,7 @@ func TestHandlerPrometheusAnnotations(t *testing.T) { }, { Name: "Does not set annotations if metrics are not enabled", - Handler: Handler{ + Handler: ConnectWebhook{ MetricsConfig: MetricsConfig{ DefaultEnableMetrics: false, DefaultPrometheusScrapePort: "20200", @@ -1157,7 +1157,7 @@ func TestConsulNamespace(t *testing.T) { t.Run(tt.Name, func(t *testing.T) { require := require.New(t) - h := Handler{ + h := ConnectWebhook{ EnableNamespaces: tt.EnableNamespaces, ConsulDestinationNamespace: tt.ConsulDestinationNamespace, EnableK8SNSMirroring: tt.EnableK8SNSMirroring, @@ -1459,7 +1459,7 @@ func TestShouldInject(t *testing.T) { t.Run(tt.Name, func(t *testing.T) { require := require.New(t) - h := Handler{ + h := ConnectWebhook{ RequireAnnotation: false, EnableNamespaces: tt.EnableNamespaces, AllowK8sNamespacesSet: tt.AllowK8sNamespacesSet, @@ -1765,7 +1765,7 @@ func TestOverwriteProbes(t *testing.T) { pod.ObjectMeta.Annotations = c.additionalAnnotations } - h := Handler{ + h := ConnectWebhook{ EnableTransparentProxy: c.tproxyEnabled, TProxyOverwriteProbes: c.overwriteProbes, } @@ -1811,7 +1811,7 @@ func TestHandler_checkUnsupportedMultiPortCases(t *testing.T) { } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - h := Handler{} + h := ConnectWebhook{} pod := minimal() pod.Annotations = tt.annotations err := h.checkUnsupportedMultiPortCases(corev1.Namespace{}, *pod) diff --git a/control-plane/connect-inject/consul_sidecar.go b/control-plane/connect-inject/consul_sidecar.go index d296980988..2f2bac34b8 100644 --- a/control-plane/connect-inject/consul_sidecar.go +++ b/control-plane/connect-inject/consul_sidecar.go @@ -11,13 +11,13 @@ import ( // the metrics merging server when metrics merging feature is enabled. // It always disables service registration because for connect we no longer // need to keep services registered as this is handled in the endpoints-controller. -func (h *Handler) consulSidecar(pod corev1.Pod) (corev1.Container, error) { - metricsPorts, err := h.MetricsConfig.mergedMetricsServerConfiguration(pod) +func (w *ConnectWebhook) consulSidecar(pod corev1.Pod) (corev1.Container, error) { + metricsPorts, err := w.MetricsConfig.mergedMetricsServerConfiguration(pod) if err != nil { return corev1.Container{}, err } - resources, err := h.consulSidecarResources(pod) + resources, err := w.consulSidecarResources(pod) if err != nil { return corev1.Container{}, err } @@ -30,13 +30,13 @@ func (h *Handler) consulSidecar(pod corev1.Pod) (corev1.Container, error) { fmt.Sprintf("-merged-metrics-port=%s", metricsPorts.mergedPort), fmt.Sprintf("-service-metrics-port=%s", metricsPorts.servicePort), fmt.Sprintf("-service-metrics-path=%s", metricsPorts.servicePath), - fmt.Sprintf("-log-level=%s", h.LogLevel), - fmt.Sprintf("-log-json=%t", h.LogJSON), + fmt.Sprintf("-log-level=%s", w.LogLevel), + fmt.Sprintf("-log-json=%t", w.LogJSON), } return corev1.Container{ Name: "consul-sidecar", - Image: h.ImageConsulK8S, + Image: w.ImageConsulK8S, VolumeMounts: []corev1.VolumeMount{ { Name: volumeName, @@ -48,7 +48,7 @@ func (h *Handler) consulSidecar(pod corev1.Pod) (corev1.Container, error) { }, nil } -func (h *Handler) consulSidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { +func (w *ConnectWebhook) consulSidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { resources := corev1.ResourceRequirements{ Limits: corev1.ResourceList{}, Requests: corev1.ResourceList{}, @@ -74,8 +74,8 @@ func (h *Handler) consulSidecarResources(pod corev1.Pod) (corev1.ResourceRequire return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarCPULimit, anno, err) } resources.Limits[corev1.ResourceCPU] = cpuLimit - } else if h.DefaultConsulSidecarResources.Limits[corev1.ResourceCPU] != zeroQuantity { - resources.Limits[corev1.ResourceCPU] = h.DefaultConsulSidecarResources.Limits[corev1.ResourceCPU] + } else if w.DefaultConsulSidecarResources.Limits[corev1.ResourceCPU] != zeroQuantity { + resources.Limits[corev1.ResourceCPU] = w.DefaultConsulSidecarResources.Limits[corev1.ResourceCPU] } // CPU Request. @@ -85,8 +85,8 @@ func (h *Handler) consulSidecarResources(pod corev1.Pod) (corev1.ResourceRequire return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarCPURequest, anno, err) } resources.Requests[corev1.ResourceCPU] = cpuRequest - } else if h.DefaultConsulSidecarResources.Requests[corev1.ResourceCPU] != zeroQuantity { - resources.Requests[corev1.ResourceCPU] = h.DefaultConsulSidecarResources.Requests[corev1.ResourceCPU] + } else if w.DefaultConsulSidecarResources.Requests[corev1.ResourceCPU] != zeroQuantity { + resources.Requests[corev1.ResourceCPU] = w.DefaultConsulSidecarResources.Requests[corev1.ResourceCPU] } // Memory Limit. @@ -96,8 +96,8 @@ func (h *Handler) consulSidecarResources(pod corev1.Pod) (corev1.ResourceRequire return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarMemoryLimit, anno, err) } resources.Limits[corev1.ResourceMemory] = memoryLimit - } else if h.DefaultConsulSidecarResources.Limits[corev1.ResourceMemory] != zeroQuantity { - resources.Limits[corev1.ResourceMemory] = h.DefaultConsulSidecarResources.Limits[corev1.ResourceMemory] + } else if w.DefaultConsulSidecarResources.Limits[corev1.ResourceMemory] != zeroQuantity { + resources.Limits[corev1.ResourceMemory] = w.DefaultConsulSidecarResources.Limits[corev1.ResourceMemory] } // Memory Request. @@ -107,8 +107,8 @@ func (h *Handler) consulSidecarResources(pod corev1.Pod) (corev1.ResourceRequire return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarMemoryRequest, anno, err) } resources.Requests[corev1.ResourceMemory] = memoryRequest - } else if h.DefaultConsulSidecarResources.Requests[corev1.ResourceMemory] != zeroQuantity { - resources.Requests[corev1.ResourceMemory] = h.DefaultConsulSidecarResources.Requests[corev1.ResourceMemory] + } else if w.DefaultConsulSidecarResources.Requests[corev1.ResourceMemory] != zeroQuantity { + resources.Requests[corev1.ResourceMemory] = w.DefaultConsulSidecarResources.Requests[corev1.ResourceMemory] } return resources, nil diff --git a/control-plane/connect-inject/consul_sidecar_test.go b/control-plane/connect-inject/consul_sidecar_test.go index da3cd5c7e1..fac2454991 100644 --- a/control-plane/connect-inject/consul_sidecar_test.go +++ b/control-plane/connect-inject/consul_sidecar_test.go @@ -13,7 +13,7 @@ import ( // Test that if the conditions for running a merged metrics server are true, // that we pass the metrics flags to consul sidecar. func TestConsulSidecar_MetricsFlags(t *testing.T) { - handler := Handler{ + handler := ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -53,13 +53,13 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { zero := resource.MustParse("0") cases := map[string]struct { - handler Handler + handler ConnectWebhook annotations map[string]string expResources corev1.ResourceRequirements expErr string }{ "no defaults, no annotations": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -78,7 +78,7 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { }, }, "all defaults, no annotations": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -113,7 +113,7 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { }, }, "no defaults, all annotations": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -142,7 +142,7 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { }, }, "annotations override defaults": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -181,7 +181,7 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { }, }, "defaults set to zero, no annotations": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -216,7 +216,7 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { }, }, "annotations set to 0": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -245,7 +245,7 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { }, }, "invalid cpu request": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -262,7 +262,7 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-cpu-request:\"invalid\": quantities must match the regular expression", }, "invalid cpu limit": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -279,7 +279,7 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-cpu-limit:\"invalid\": quantities must match the regular expression", }, "invalid memory request": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ @@ -296,7 +296,7 @@ func TestHandlerConsulSidecar_Resources(t *testing.T) { expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-memory-request:\"invalid\": quantities must match the regular expression", }, "invalid memory limit": { - handler: Handler{ + handler: ConnectWebhook{ Log: logrtest.TestLogger{T: t}, ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", MetricsConfig: MetricsConfig{ diff --git a/control-plane/connect-inject/container_env.go b/control-plane/connect-inject/container_env.go index f94e3834ee..f20976d5f9 100644 --- a/control-plane/connect-inject/container_env.go +++ b/control-plane/connect-inject/container_env.go @@ -8,7 +8,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -func (h *Handler) containerEnvVars(pod corev1.Pod) []corev1.EnvVar { +func (w *ConnectWebhook) containerEnvVars(pod corev1.Pod) []corev1.EnvVar { raw, ok := pod.Annotations[annotationUpstreams] if !ok || raw == "" { return []corev1.EnvVar{} diff --git a/control-plane/connect-inject/container_env_test.go b/control-plane/connect-inject/container_env_test.go index 969b28e166..41eb45d71b 100644 --- a/control-plane/connect-inject/container_env_test.go +++ b/control-plane/connect-inject/container_env_test.go @@ -28,7 +28,7 @@ func TestContainerEnvVars(t *testing.T) { t.Run(tt.Name, func(t *testing.T) { require := require.New(t) - var h Handler + var h ConnectWebhook envVars := h.containerEnvVars(corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ diff --git a/control-plane/connect-inject/container_init.go b/control-plane/connect-inject/container_init.go index 4eedb78090..3c4974784a 100644 --- a/control-plane/connect-inject/container_init.go +++ b/control-plane/connect-inject/container_init.go @@ -93,13 +93,13 @@ type initContainerCommandData struct { // initCopyContainer returns the init container spec for the copy container which places // the consul binary into the shared volume. -func (h *Handler) initCopyContainer() corev1.Container { +func (w *ConnectWebhook) initCopyContainer() corev1.Container { // Copy the Consul binary from the image to the shared volume. cmd := "cp /bin/consul /consul/connect-inject/consul" container := corev1.Container{ Name: InjectInitCopyContainerName, - Image: h.ImageConsul, - Resources: h.InitContainerResources, + Image: w.ImageConsul, + Resources: w.InitContainerResources, VolumeMounts: []corev1.VolumeMount{ { Name: volumeName, @@ -109,7 +109,7 @@ func (h *Handler) initCopyContainer() corev1.Container { Command: []string{"/bin/sh", "-ec", cmd}, } // If running on OpenShift, don't set the security context and instead let OpenShift set a random user/group for us. - if !h.EnableOpenShift { + if !w.EnableOpenShift { container.SecurityContext = &corev1.SecurityContext{ // Set RunAsUser because the default user for the consul container is root and we want to run non-root. RunAsUser: pointerToInt64(copyContainerUserAndGroupID), @@ -123,14 +123,14 @@ func (h *Handler) initCopyContainer() corev1.Container { // containerInit returns the init container spec for connect-init that polls for the service and the connect proxy service to be registered // so that it can save the proxy service id to the shared volume and boostrap Envoy with the proxy-id. -func (h *Handler) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { +func (w *ConnectWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { // Check if tproxy is enabled on this pod. - tproxyEnabled, err := transparentProxyEnabled(namespace, pod, h.EnableTransparentProxy) + tproxyEnabled, err := transparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) if err != nil { return corev1.Container{}, err } - dnsEnabled, err := consulDNSEnabled(namespace, pod, h.EnableConsulDNS) + dnsEnabled, err := consulDNSEnabled(namespace, pod, w.EnableConsulDNS) if err != nil { return corev1.Container{}, err } @@ -140,20 +140,20 @@ func (h *Handler) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi // If Consul DNS is enabled, we find the environment variable that has the value // of the ClusterIP of the Consul DNS Service. constructDNSServiceHostName returns // the name of the env variable whose value is the ClusterIP of the Consul DNS Service. - consulDNSClusterIP = os.Getenv(h.constructDNSServiceHostName()) + consulDNSClusterIP = os.Getenv(w.constructDNSServiceHostName()) if consulDNSClusterIP == "" { - return corev1.Container{}, fmt.Errorf("environment variable %s is not found", h.constructDNSServiceHostName()) + return corev1.Container{}, fmt.Errorf("environment variable %s is not found", w.constructDNSServiceHostName()) } } multiPort := mpi.serviceName != "" data := initContainerCommandData{ - AuthMethod: h.AuthMethod, - ConsulPartition: h.ConsulPartition, - ConsulNamespace: h.consulNamespace(namespace.Name), - NamespaceMirroringEnabled: h.EnableK8SNSMirroring, - ConsulCACert: h.ConsulCACert, + AuthMethod: w.AuthMethod, + ConsulPartition: w.ConsulPartition, + ConsulNamespace: w.consulNamespace(namespace.Name), + NamespaceMirroringEnabled: w.EnableK8SNSMirroring, + ConsulCACert: w.ConsulCACert, EnableTransparentProxy: tproxyEnabled, TProxyExcludeInboundPorts: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeInboundPorts, pod), TProxyExcludeOutboundPorts: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundPorts, pod), @@ -163,7 +163,7 @@ func (h *Handler) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi EnvoyUID: envoyUserAndGroupID, MultiPort: multiPort, EnvoyAdminPort: 19000 + mpi.serviceIndex, - ConsulAPITimeout: h.ConsulAPITimeout, + ConsulAPITimeout: w.ConsulAPITimeout, } // Create expected volume mounts @@ -179,7 +179,7 @@ func (h *Handler) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi } else { data.ServiceName = pod.Annotations[annotationService] } - if h.AuthMethod != "" { + if w.AuthMethod != "" { if multiPort { // If multi port then we require that the service account name // matches the service name. @@ -201,13 +201,13 @@ func (h *Handler) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi // This determines how to configure the consul connect envoy command: what // metrics backend to use and what path to expose on the // envoy_prometheus_bind_addr listener for scraping. - metricsServer, err := h.MetricsConfig.shouldRunMergedMetricsServer(pod) + metricsServer, err := w.MetricsConfig.shouldRunMergedMetricsServer(pod) if err != nil { return corev1.Container{}, err } if metricsServer { - prometheusScrapePath := h.MetricsConfig.prometheusScrapePath(pod) - mergedMetricsPort, err := h.MetricsConfig.mergedMetricsPort(pod) + prometheusScrapePath := w.MetricsConfig.prometheusScrapePath(pod) + mergedMetricsPort, err := w.MetricsConfig.mergedMetricsPort(pod) if err != nil { return corev1.Container{}, err } @@ -230,7 +230,7 @@ func (h *Handler) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi } container := corev1.Container{ Name: initContainerName, - Image: h.ImageConsulK8S, + Image: w.ImageConsulK8S, Env: []corev1.EnvVar{ { Name: "HOST_IP", @@ -257,7 +257,7 @@ func (h *Handler) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi }, }, }, - Resources: h.InitContainerResources, + Resources: w.InitContainerResources, VolumeMounts: volMounts, Command: []string{"/bin/sh", "-ec", buf.String()}, } @@ -283,8 +283,8 @@ func (h *Handler) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi // constructDNSServiceHostName use the resource prefix and the DNS Service hostname suffix to construct the // key of the env variable whose value is the cluster IP of the Consul DNS Service. // It translates "resource-prefix" into "RESOURCE_PREFIX_DNS_SERVICE_HOST". -func (h *Handler) constructDNSServiceHostName() string { - upcaseResourcePrefix := strings.ToUpper(h.ResourcePrefix) +func (w *ConnectWebhook) constructDNSServiceHostName() string { + upcaseResourcePrefix := strings.ToUpper(w.ResourcePrefix) upcaseResourcePrefixWithUnderscores := strings.ReplaceAll(upcaseResourcePrefix, "-", "_") return strings.Join([]string{upcaseResourcePrefixWithUnderscores, dnsServiceHostEnvSuffix}, "_") } diff --git a/control-plane/connect-inject/container_init_test.go b/control-plane/connect-inject/container_init_test.go index f40bf77512..1954ea2f61 100644 --- a/control-plane/connect-inject/container_init_test.go +++ b/control-plane/connect-inject/container_init_test.go @@ -46,7 +46,7 @@ func TestHandlerContainerInit(t *testing.T) { cases := []struct { Name string Pod func(*corev1.Pod) *corev1.Pod - Handler Handler + Handler ConnectWebhook Cmd string // Strings.Contains test CmdNot string // Not contains }{ @@ -58,7 +58,7 @@ func TestHandlerContainerInit(t *testing.T) { pod.Annotations[annotationService] = "web" return pod }, - Handler{}, + ConnectWebhook{}, `/bin/sh -ec export CONSUL_HTTP_ADDR="${HOST_IP}:8500" export CONSUL_GRPC_ADDR="${HOST_IP}:8502" @@ -85,7 +85,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD } return pod }, - Handler{ + ConnectWebhook{ AuthMethod: "an-auth-method", ConsulAPITimeout: 5 * time.Second, }, @@ -118,7 +118,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD pod.Annotations[annotationPrometheusScrapePath] = "/scrape-path" return pod }, - Handler{ + ConnectWebhook{ ConsulAPITimeout: 5 * time.Second, }, `# Generate the envoy bootstrap code @@ -287,7 +287,7 @@ func TestHandlerContainerInit_transparentProxy(t *testing.T) { } for name, c := range cases { t.Run(name, func(t *testing.T) { - h := Handler{ + h := ConnectWebhook{ EnableTransparentProxy: c.globalEnabled, ConsulAPITimeout: 5 * time.Second, } @@ -384,7 +384,7 @@ func TestHandlerContainerInit_consulDNS(t *testing.T) { } for name, c := range cases { t.Run(name, func(t *testing.T) { - h := Handler{ + h := ConnectWebhook{ EnableConsulDNS: c.globalEnabled, EnableTransparentProxy: true, ResourcePrefix: "consul-consul", @@ -428,7 +428,7 @@ func TestHandler_constructDNSServiceHostName(t *testing.T) { for _, c := range cases { t.Run(c.prefix, func(t *testing.T) { - h := Handler{ResourcePrefix: c.prefix, ConsulAPITimeout: 5 * time.Second} + h := ConnectWebhook{ResourcePrefix: c.prefix, ConsulAPITimeout: 5 * time.Second} require.Equal(t, c.result, h.constructDNSServiceHostName()) }) } @@ -469,7 +469,7 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { cases := []struct { Name string Pod func(*corev1.Pod) *corev1.Pod - Handler Handler + Handler ConnectWebhook Cmd string // Strings.Contains test }{ { @@ -478,7 +478,7 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { pod.Annotations[annotationService] = "web" return pod }, - Handler{ + ConnectWebhook{ EnableNamespaces: true, ConsulDestinationNamespace: "default", ConsulPartition: "", @@ -503,7 +503,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD pod.Annotations[annotationService] = "web" return pod }, - Handler{ + ConnectWebhook{ EnableNamespaces: true, ConsulDestinationNamespace: "default", ConsulPartition: "default", @@ -530,7 +530,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD pod.Annotations[annotationService] = "web" return pod }, - Handler{ + ConnectWebhook{ EnableNamespaces: true, ConsulDestinationNamespace: "non-default", ConsulPartition: "", @@ -555,7 +555,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD pod.Annotations[annotationService] = "web" return pod }, - Handler{ + ConnectWebhook{ EnableNamespaces: true, ConsulDestinationNamespace: "non-default", ConsulPartition: "non-default-part", @@ -582,7 +582,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD pod.Annotations[annotationService] = "" return pod }, - Handler{ + ConnectWebhook{ AuthMethod: "auth-method", EnableNamespaces: true, ConsulDestinationNamespace: "non-default", @@ -616,7 +616,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD pod.Annotations[annotationService] = "" return pod }, - Handler{ + ConnectWebhook{ AuthMethod: "auth-method", EnableNamespaces: true, ConsulDestinationNamespace: "non-default", // Overridden by mirroring @@ -651,7 +651,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD pod.Annotations[annotationService] = "web" return pod }, - Handler{ + ConnectWebhook{ EnableNamespaces: true, ConsulDestinationNamespace: "default", ConsulPartition: "", @@ -683,7 +683,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD pod.Annotations[annotationService] = "web" return pod }, - Handler{ + ConnectWebhook{ EnableNamespaces: true, ConsulPartition: "default", ConsulDestinationNamespace: "non-default", @@ -719,7 +719,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD pod.Annotations[annotationService] = "web" return pod }, - Handler{ + ConnectWebhook{ AuthMethod: "auth-method", EnableNamespaces: true, ConsulPartition: "non-default", @@ -818,7 +818,7 @@ func TestHandlerContainerInit_Multiport(t *testing.T) { cases := []struct { Name string Pod func(*corev1.Pod) *corev1.Pod - Handler Handler + Handler ConnectWebhook NumInitContainers int MultiPortInfos []multiPortInfo Cmd []string // Strings.Contains test @@ -828,7 +828,7 @@ func TestHandlerContainerInit_Multiport(t *testing.T) { func(pod *corev1.Pod) *corev1.Pod { return pod }, - Handler{ConsulAPITimeout: 5 * time.Second}, + ConnectWebhook{ConsulAPITimeout: 5 * time.Second}, 2, []multiPortInfo{ { @@ -876,7 +876,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD func(pod *corev1.Pod) *corev1.Pod { return pod }, - Handler{ + ConnectWebhook{ AuthMethod: "auth-method", ConsulAPITimeout: 5 * time.Second, }, @@ -951,7 +951,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD func TestHandlerContainerInit_authMethod(t *testing.T) { require := require.New(t) - h := Handler{ + h := ConnectWebhook{ AuthMethod: "release-name-consul-k8s-auth-method", ConsulAPITimeout: 5 * time.Second, } @@ -998,7 +998,7 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD // and CA cert should be set as env variable. func TestHandlerContainerInit_WithTLS(t *testing.T) { require := require.New(t) - h := Handler{ + h := ConnectWebhook{ ConsulCACert: "consul-ca-cert", ConsulAPITimeout: 5 * time.Second, } @@ -1034,7 +1034,7 @@ export CONSUL_GRPC_ADDR="${HOST_IP}:8502"`) func TestHandlerContainerInit_Resources(t *testing.T) { require := require.New(t) - h := Handler{ + h := ConnectWebhook{ InitContainerResources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("10m"), @@ -1082,7 +1082,7 @@ func TestHandlerInitCopyContainer(t *testing.T) { for _, openShiftEnabled := range openShiftEnabledCases { t.Run(fmt.Sprintf("openshift enabled: %t", openShiftEnabled), func(t *testing.T) { - h := Handler{EnableOpenShift: openShiftEnabled, ConsulAPITimeout: 5 * time.Second} + h := ConnectWebhook{EnableOpenShift: openShiftEnabled, ConsulAPITimeout: 5 * time.Second} container := h.initCopyContainer() diff --git a/control-plane/connect-inject/container_volume.go b/control-plane/connect-inject/container_volume.go index e3a70676f1..c9e794b45b 100644 --- a/control-plane/connect-inject/container_volume.go +++ b/control-plane/connect-inject/container_volume.go @@ -10,7 +10,7 @@ const volumeName = "consul-connect-inject-data" // containerVolume returns the volume data to add to the pod. This volume // is used for shared data between containers. -func (h *Handler) containerVolume() corev1.Volume { +func (w *ConnectWebhook) containerVolume() corev1.Volume { return corev1.Volume{ Name: volumeName, VolumeSource: corev1.VolumeSource{ diff --git a/control-plane/connect-inject/endpoints_controller_test.go b/control-plane/connect-inject/endpoints_controller_test.go index aaac91547c..c1ded649e8 100644 --- a/control-plane/connect-inject/endpoints_controller_test.go +++ b/control-plane/connect-inject/endpoints_controller_test.go @@ -123,7 +123,7 @@ func TestProcessUpstreamsTLSandACLs(t *testing.T) { masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" caFile, certFile, keyFile := test.GenerateServerCerts(t) - // Create test consul server with ACLs and TLS + // Create test consul server with ACLs and TLS. consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true c.ACL.DefaultPolicy = "deny" @@ -1018,7 +1018,7 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create test consul server + // Create test consul server. consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.NodeName = nodeName }) @@ -1683,7 +1683,7 @@ func TestReconcileCreateEndpoint(t *testing.T) { fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create test consul server + // Create test consul server. consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.NodeName = nodeName }) diff --git a/control-plane/connect-inject/envoy_sidecar.go b/control-plane/connect-inject/envoy_sidecar.go index 02d3869556..716b4d4ef5 100644 --- a/control-plane/connect-inject/envoy_sidecar.go +++ b/control-plane/connect-inject/envoy_sidecar.go @@ -10,14 +10,14 @@ import ( "k8s.io/apimachinery/pkg/api/resource" ) -func (h *Handler) envoySidecar(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { - resources, err := h.envoySidecarResources(pod) +func (w *ConnectWebhook) envoySidecar(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { + resources, err := w.envoySidecarResources(pod) if err != nil { return corev1.Container{}, err } multiPort := mpi.serviceName != "" - cmd, err := h.getContainerSidecarCommand(pod, mpi.serviceName, mpi.serviceIndex) + cmd, err := w.getContainerSidecarCommand(pod, mpi.serviceName, mpi.serviceIndex) if err != nil { return corev1.Container{}, err } @@ -29,7 +29,7 @@ func (h *Handler) envoySidecar(namespace corev1.Namespace, pod corev1.Pod, mpi m container := corev1.Container{ Name: containerName, - Image: h.ImageEnvoy, + Image: w.ImageEnvoy, Env: []corev1.EnvVar{ { Name: "HOST_IP", @@ -48,7 +48,7 @@ func (h *Handler) envoySidecar(namespace corev1.Namespace, pod corev1.Pod, mpi m Command: cmd, } - tproxyEnabled, err := transparentProxyEnabled(namespace, pod, h.EnableTransparentProxy) + tproxyEnabled, err := transparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) if err != nil { return corev1.Container{}, err } @@ -57,7 +57,7 @@ func (h *Handler) envoySidecar(namespace corev1.Namespace, pod corev1.Pod, mpi m // skip setting the security context and let OpenShift set it for us. // When transparent proxy is enabled, then Envoy needs to run as our specific user // so that traffic redirection will work. - if tproxyEnabled || !h.EnableOpenShift { + if tproxyEnabled || !w.EnableOpenShift { if pod.Spec.SecurityContext != nil { // User container and Envoy container cannot have the same UID. if pod.Spec.SecurityContext.RunAsUser != nil && *pod.Spec.SecurityContext.RunAsUser == envoyUserAndGroupID { @@ -68,7 +68,7 @@ func (h *Handler) envoySidecar(namespace corev1.Namespace, pod corev1.Pod, mpi m // has only injected init containers so all containers defined in pod.Spec.Containers are from the user. for _, c := range pod.Spec.Containers { // User container and Envoy container cannot have the same UID. - if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil && *c.SecurityContext.RunAsUser == envoyUserAndGroupID && c.Image != h.ImageEnvoy { + if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil && *c.SecurityContext.RunAsUser == envoyUserAndGroupID && c.Image != w.ImageEnvoy { return corev1.Container{}, fmt.Errorf("container %q has runAsUser set to the same uid %q as envoy which is not allowed", c.Name, envoyUserAndGroupID) } } @@ -82,7 +82,7 @@ func (h *Handler) envoySidecar(namespace corev1.Namespace, pod corev1.Pod, mpi m return container, nil } -func (h *Handler) getContainerSidecarCommand(pod corev1.Pod, multiPortSvcName string, multiPortSvcIdx int) ([]string, error) { +func (w *ConnectWebhook) getContainerSidecarCommand(pod corev1.Pod, multiPortSvcName string, multiPortSvcIdx int) ([]string, error) { bootstrapFile := "/consul/connect-inject/envoy-bootstrap.yaml" if multiPortSvcName != "" { bootstrapFile = fmt.Sprintf("/consul/connect-inject/envoy-bootstrap-%s.yaml", multiPortSvcName) @@ -98,9 +98,8 @@ func (h *Handler) getContainerSidecarCommand(pod corev1.Pod, multiPortSvcName st extraArgs, annotationSet := pod.Annotations[annotationEnvoyExtraArgs] - if annotationSet || h.EnvoyExtraArgs != "" { - - extraArgsToUse := h.EnvoyExtraArgs + if annotationSet || w.EnvoyExtraArgs != "" { + extraArgsToUse := w.EnvoyExtraArgs // Prefer args set by pod annotation over the flag to the consul-k8s binary (h.EnvoyExtraArgs). if annotationSet { @@ -123,7 +122,7 @@ func (h *Handler) getContainerSidecarCommand(pod corev1.Pod, multiPortSvcName st return cmd, nil } -func (h *Handler) envoySidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { +func (w *ConnectWebhook) envoySidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { resources := corev1.ResourceRequirements{ Limits: corev1.ResourceList{}, Requests: corev1.ResourceList{}, @@ -149,8 +148,8 @@ func (h *Handler) envoySidecarResources(pod corev1.Pod) (corev1.ResourceRequirem return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyCPULimit, anno, err) } resources.Limits[corev1.ResourceCPU] = cpuLimit - } else if h.DefaultProxyCPULimit != zeroQuantity { - resources.Limits[corev1.ResourceCPU] = h.DefaultProxyCPULimit + } else if w.DefaultProxyCPULimit != zeroQuantity { + resources.Limits[corev1.ResourceCPU] = w.DefaultProxyCPULimit } // CPU Request. @@ -160,8 +159,8 @@ func (h *Handler) envoySidecarResources(pod corev1.Pod) (corev1.ResourceRequirem return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyCPURequest, anno, err) } resources.Requests[corev1.ResourceCPU] = cpuRequest - } else if h.DefaultProxyCPURequest != zeroQuantity { - resources.Requests[corev1.ResourceCPU] = h.DefaultProxyCPURequest + } else if w.DefaultProxyCPURequest != zeroQuantity { + resources.Requests[corev1.ResourceCPU] = w.DefaultProxyCPURequest } // Memory Limit. @@ -171,8 +170,8 @@ func (h *Handler) envoySidecarResources(pod corev1.Pod) (corev1.ResourceRequirem return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyMemoryLimit, anno, err) } resources.Limits[corev1.ResourceMemory] = memoryLimit - } else if h.DefaultProxyMemoryLimit != zeroQuantity { - resources.Limits[corev1.ResourceMemory] = h.DefaultProxyMemoryLimit + } else if w.DefaultProxyMemoryLimit != zeroQuantity { + resources.Limits[corev1.ResourceMemory] = w.DefaultProxyMemoryLimit } // Memory Request. @@ -182,8 +181,8 @@ func (h *Handler) envoySidecarResources(pod corev1.Pod) (corev1.ResourceRequirem return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyMemoryRequest, anno, err) } resources.Requests[corev1.ResourceMemory] = memoryRequest - } else if h.DefaultProxyMemoryRequest != zeroQuantity { - resources.Requests[corev1.ResourceMemory] = h.DefaultProxyMemoryRequest + } else if w.DefaultProxyMemoryRequest != zeroQuantity { + resources.Requests[corev1.ResourceMemory] = w.DefaultProxyMemoryRequest } return resources, nil diff --git a/control-plane/connect-inject/envoy_sidecar_test.go b/control-plane/connect-inject/envoy_sidecar_test.go index 56af91ab3e..d631b0f481 100644 --- a/control-plane/connect-inject/envoy_sidecar_test.go +++ b/control-plane/connect-inject/envoy_sidecar_test.go @@ -12,7 +12,7 @@ import ( func TestHandlerEnvoySidecar(t *testing.T) { require := require.New(t) - h := Handler{} + h := ConnectWebhook{} pod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -45,7 +45,7 @@ func TestHandlerEnvoySidecar(t *testing.T) { func TestHandlerEnvoySidecar_Multiport(t *testing.T) { require := require.New(t) - h := Handler{} + h := ConnectWebhook{} pod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -136,7 +136,7 @@ func TestHandlerEnvoySidecar_withSecurityContext(t *testing.T) { } for name, c := range cases { t.Run(name, func(t *testing.T) { - h := Handler{ + h := ConnectWebhook{ EnableTransparentProxy: c.tproxyEnabled, EnableOpenShift: c.openShiftEnabled, } @@ -166,7 +166,7 @@ func TestHandlerEnvoySidecar_withSecurityContext(t *testing.T) { // an error to the handler. func TestHandlerEnvoySidecar_FailsWithDuplicatePodSecurityContextUID(t *testing.T) { require := require.New(t) - h := Handler{} + h := ConnectWebhook{} pod := corev1.Pod{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -190,7 +190,7 @@ func TestHandlerEnvoySidecar_FailsWithDuplicateContainerSecurityContextUID(t *te cases := []struct { name string pod corev1.Pod - handler Handler + handler ConnectWebhook expErr bool expErrMessage error }{ @@ -217,7 +217,7 @@ func TestHandlerEnvoySidecar_FailsWithDuplicateContainerSecurityContextUID(t *te }, }, }, - handler: Handler{}, + handler: ConnectWebhook{}, expErr: true, expErrMessage: fmt.Errorf("container app has runAsUser set to the same uid %q as envoy which is not allowed", envoyUserAndGroupID), }, @@ -244,7 +244,7 @@ func TestHandlerEnvoySidecar_FailsWithDuplicateContainerSecurityContextUID(t *te }, }, }, - handler: Handler{ + handler: ConnectWebhook{ ImageEnvoy: "envoy", }, expErr: false, @@ -341,7 +341,7 @@ func TestHandlerEnvoySidecar_EnvoyExtraArgs(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - h := Handler{ + h := ConnectWebhook{ ImageConsul: "hashicorp/consul:latest", ImageEnvoy: "hashicorp/consul-k8s:latest", EnvoyExtraArgs: tc.envoyExtraArgs, @@ -362,13 +362,13 @@ func TestHandlerEnvoySidecar_Resources(t *testing.T) { zero := resource.MustParse("0") cases := map[string]struct { - handler Handler + handler ConnectWebhook annotations map[string]string expResources corev1.ResourceRequirements expErr string }{ "no defaults, no annotations": { - handler: Handler{}, + handler: ConnectWebhook{}, annotations: nil, expResources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{}, @@ -376,7 +376,7 @@ func TestHandlerEnvoySidecar_Resources(t *testing.T) { }, }, "all defaults, no annotations": { - handler: Handler{ + handler: ConnectWebhook{ DefaultProxyCPURequest: cpu1, DefaultProxyCPULimit: cpu2, DefaultProxyMemoryRequest: mem1, @@ -395,7 +395,7 @@ func TestHandlerEnvoySidecar_Resources(t *testing.T) { }, }, "no defaults, all annotations": { - handler: Handler{}, + handler: ConnectWebhook{}, annotations: map[string]string{ annotationSidecarProxyCPURequest: "100m", annotationSidecarProxyMemoryRequest: "100Mi", @@ -414,7 +414,7 @@ func TestHandlerEnvoySidecar_Resources(t *testing.T) { }, }, "annotations override defaults": { - handler: Handler{ + handler: ConnectWebhook{ DefaultProxyCPURequest: zero, DefaultProxyCPULimit: zero, DefaultProxyMemoryRequest: zero, @@ -438,7 +438,7 @@ func TestHandlerEnvoySidecar_Resources(t *testing.T) { }, }, "defaults set to zero, no annotations": { - handler: Handler{ + handler: ConnectWebhook{ DefaultProxyCPURequest: zero, DefaultProxyCPULimit: zero, DefaultProxyMemoryRequest: zero, @@ -457,7 +457,7 @@ func TestHandlerEnvoySidecar_Resources(t *testing.T) { }, }, "annotations set to 0": { - handler: Handler{}, + handler: ConnectWebhook{}, annotations: map[string]string{ annotationSidecarProxyCPURequest: "0", annotationSidecarProxyMemoryRequest: "0", @@ -476,28 +476,28 @@ func TestHandlerEnvoySidecar_Resources(t *testing.T) { }, }, "invalid cpu request": { - handler: Handler{}, + handler: ConnectWebhook{}, annotations: map[string]string{ annotationSidecarProxyCPURequest: "invalid", }, expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-request:\"invalid\": quantities must match the regular expression", }, "invalid cpu limit": { - handler: Handler{}, + handler: ConnectWebhook{}, annotations: map[string]string{ annotationSidecarProxyCPULimit: "invalid", }, expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-limit:\"invalid\": quantities must match the regular expression", }, "invalid memory request": { - handler: Handler{}, + handler: ConnectWebhook{}, annotations: map[string]string{ annotationSidecarProxyMemoryRequest: "invalid", }, expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-request:\"invalid\": quantities must match the regular expression", }, "invalid memory limit": { - handler: Handler{}, + handler: ConnectWebhook{}, annotations: map[string]string{ annotationSidecarProxyMemoryLimit: "invalid", }, diff --git a/control-plane/connect-inject/peering_acceptor_controller.go b/control-plane/connect-inject/peering_acceptor_controller.go index d656cbf5dd..5f3308e7ca 100644 --- a/control-plane/connect-inject/peering_acceptor_controller.go +++ b/control-plane/connect-inject/peering_acceptor_controller.go @@ -46,15 +46,14 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ r.Log.Info("received request for PeeringAcceptor", "name", req.Name, "ns", req.Namespace) // Get the PeeringAcceptor resource. - peeringAcceptor := &consulv1alpha1.PeeringAcceptor{} - err := r.Client.Get(ctx, req.NamespacedName, peeringAcceptor) + acceptor := &consulv1alpha1.PeeringAcceptor{} + err := r.Client.Get(ctx, req.NamespacedName, acceptor) // If the PeeringAcceptor resource has been deleted (and we get an IsNotFound // error), we need to delete it in Consul. if k8serrors.IsNotFound(err) { r.Log.Info("PeeringAcceptor was deleted, deleting from Consul", "name", req.Name, "ns", req.Namespace) - err := r.deletePeering(ctx, req.Name) - if err != nil { + if err := r.deletePeering(ctx, req.Name); err != nil { return ctrl.Result{}, err } return ctrl.Result{}, nil @@ -63,14 +62,14 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } - statusSecretSet := peeringAcceptor.Status.SecretRef != nil + statusSecretSet := acceptor.SecretRef() != nil // existingStatusSecret will be nil if it doesn't exist, and have the contents of the secret if it does exist. var existingStatusSecret *corev1.Secret if statusSecretSet { - existingStatusSecret, err = r.getExistingSecret(ctx, peeringAcceptor.Status.SecretRef.Name, peeringAcceptor.Namespace) + existingStatusSecret, err = r.getExistingSecret(ctx, acceptor.SecretRef().Name, acceptor.Namespace) if err != nil { - _ = r.updateStatusError(ctx, peeringAcceptor, err) + r.updateStatusError(ctx, acceptor, err) return ctrl.Result{}, err } } @@ -78,7 +77,7 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ var secretResourceVersion string // Read the peering from Consul. - peering, _, err := r.ConsulClient.Peerings().Read(ctx, peeringAcceptor.Name, nil) + peering, _, err := r.ConsulClient.Peerings().Read(ctx, acceptor.Name, nil) if err != nil { r.Log.Error(err, "failed to get Peering from Consul", "name", req.Name) return ctrl.Result{}, err @@ -87,32 +86,32 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ // If the peering doesn't exist in Consul, generate a new token, and store it in the specified backend. Store the // current state in the status. if peering == nil { - r.Log.Info("peering doesn't exist in Consul", "name", peeringAcceptor.Name) + r.Log.Info("peering doesn't exist in Consul", "name", acceptor.Name) if statusSecretSet { if existingStatusSecret != nil { err := r.Client.Delete(ctx, existingStatusSecret) if err != nil { - _ = r.updateStatusError(ctx, peeringAcceptor, err) + r.updateStatusError(ctx, acceptor, err) return ctrl.Result{}, err } } } // Generate and store the peering token. var resp *api.PeeringGenerateTokenResponse - if resp, err = r.generateToken(ctx, peeringAcceptor.Name); err != nil { - _ = r.updateStatusError(ctx, peeringAcceptor, err) + if resp, err = r.generateToken(ctx, acceptor.Name); err != nil { + r.updateStatusError(ctx, acceptor, err) return ctrl.Result{}, err } - if peeringAcceptor.Spec.Peer.Secret.Backend == "kubernetes" { - secretResourceVersion, err = r.createK8sPeeringTokenSecretWithOwner(ctx, peeringAcceptor, resp) + if acceptor.Secret().Backend == "kubernetes" { + secretResourceVersion, err = r.createK8sPeeringTokenSecretWithOwner(ctx, acceptor, resp) if err != nil { - _ = r.updateStatusError(ctx, peeringAcceptor, err) + r.updateStatusError(ctx, acceptor, err) return ctrl.Result{}, err } } // Store the state in the status. - err := r.updateStatus(ctx, peeringAcceptor, secretResourceVersion) + err := r.updateStatus(ctx, acceptor, secretResourceVersion) return ctrl.Result{}, err } else if err != nil { r.Log.Error(err, "failed to get Peering from Consul", "name", req.Name) @@ -127,9 +126,9 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ var shouldGenerate bool var nameChanged bool if statusSecretSet { - shouldGenerate, nameChanged, err = shouldGenerateToken(peeringAcceptor, existingStatusSecret) + shouldGenerate, nameChanged, err = shouldGenerateToken(acceptor, existingStatusSecret) if err != nil { - _ = r.updateStatusError(ctx, peeringAcceptor, err) + r.updateStatusError(ctx, acceptor, err) return ctrl.Result{}, err } } else { @@ -139,11 +138,11 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ if shouldGenerate { // Generate and store the peering token. var resp *api.PeeringGenerateTokenResponse - if resp, err = r.generateToken(ctx, peeringAcceptor.Name); err != nil { + if resp, err = r.generateToken(ctx, acceptor.Name); err != nil { return ctrl.Result{}, err } - if peeringAcceptor.Spec.Peer.Secret.Backend == "kubernetes" { - secretResourceVersion, err = r.createK8sPeeringTokenSecretWithOwner(ctx, peeringAcceptor, resp) + if acceptor.Secret().Backend == "kubernetes" { + secretResourceVersion, err = r.createK8sPeeringTokenSecretWithOwner(ctx, acceptor, resp) if err != nil { return ctrl.Result{}, err } @@ -153,14 +152,14 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ if existingStatusSecret != nil { err := r.Client.Delete(ctx, existingStatusSecret) if err != nil { - _ = r.updateStatusError(ctx, peeringAcceptor, err) + r.updateStatusError(ctx, acceptor, err) return ctrl.Result{}, err } } } // Store the state in the status. - err := r.updateStatus(ctx, peeringAcceptor, secretResourceVersion) + err := r.updateStatus(ctx, acceptor, secretResourceVersion) return ctrl.Result{}, err } @@ -170,26 +169,25 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ // shouldGenerateToken returns whether a token should be generated, and whether the name of the secret has changed. It // compares the spec secret's name/key/backend and contents to the status secret's name/key/backend and contents. The // contents are compared by taking a SHA256 sum of the secret. -func shouldGenerateToken(peeringAcceptor *consulv1alpha1.PeeringAcceptor, existingStatusSecret *corev1.Secret) (shouldGenerate bool, nameChanged bool, err error) { - if peeringAcceptor.Status.SecretRef == nil { +func shouldGenerateToken(acceptor *consulv1alpha1.PeeringAcceptor, existingStatusSecret *corev1.Secret) (shouldGenerate bool, nameChanged bool, err error) { + if acceptor.SecretRef() == nil { return false, false, errors.New("shouldGenerateToken was called with an empty fields in the existing status") } // Compare the existing name, key, and backend. - if peeringAcceptor.Status.SecretRef.Name != peeringAcceptor.Spec.Peer.Secret.Name { + if acceptor.SecretRef().Name != acceptor.Secret().Name { return true, true, nil } - if peeringAcceptor.Status.SecretRef.Key != peeringAcceptor.Spec.Peer.Secret.Key { + if acceptor.SecretRef().Key != acceptor.Secret().Key { return true, false, nil } - // TODO(peering): remove this when validation webhook exists. - if peeringAcceptor.Status.SecretRef.Backend != peeringAcceptor.Spec.Peer.Secret.Backend { + if acceptor.SecretRef().Backend != acceptor.Secret().Backend { return false, false, errors.New("PeeringAcceptor backend cannot be changed") } // Compare the existing secret hash. // Get the secret specified by the status, make sure it matches the status' secret.latestHash. if existingStatusSecret != nil { - if existingStatusSecret.ResourceVersion != peeringAcceptor.Status.SecretRef.ResourceVersion { + if existingStatusSecret.ResourceVersion != acceptor.SecretRef().ResourceVersion { return true, false, nil } @@ -200,49 +198,41 @@ func shouldGenerateToken(peeringAcceptor *consulv1alpha1.PeeringAcceptor, existi } // updateStatus updates the peeringAcceptor's secret in the status. -func (r *PeeringAcceptorController) updateStatus(ctx context.Context, peeringAcceptor *consulv1alpha1.PeeringAcceptor, secretResourceVersion string) error { - peeringAcceptor.Status.SecretRef = &consulv1alpha1.SecretRefStatus{ - Name: peeringAcceptor.Spec.Peer.Secret.Name, - Key: peeringAcceptor.Spec.Peer.Secret.Key, - Backend: peeringAcceptor.Spec.Peer.Secret.Backend, +func (r *PeeringAcceptorController) updateStatus(ctx context.Context, acceptor *consulv1alpha1.PeeringAcceptor, secretResourceVersion string) error { + acceptor.Status.SecretRef = &consulv1alpha1.SecretRefStatus{ + Secret: *acceptor.Secret(), + ResourceVersion: secretResourceVersion, } - - peeringAcceptor.Status.SecretRef.ResourceVersion = secretResourceVersion - - peeringAcceptor.Status.LastReconcileTime = &metav1.Time{Time: time.Now()} - peeringAcceptor.Status.ReconcileError = &consulv1alpha1.ReconcileErrorStatus{ + acceptor.Status.LastReconcileTime = &metav1.Time{Time: time.Now()} + acceptor.Status.ReconcileError = &consulv1alpha1.ReconcileErrorStatus{ Error: pointerToBool(false), Message: pointerToString(""), } - err := r.Status().Update(ctx, peeringAcceptor) + err := r.Status().Update(ctx, acceptor) if err != nil { - r.Log.Error(err, "failed to update PeeringAcceptor status", "name", peeringAcceptor.Name, "namespace", peeringAcceptor.Namespace) + r.Log.Error(err, "failed to update PeeringAcceptor status", "name", acceptor.Name, "namespace", acceptor.Namespace) } return err } // updateStatusError updates the peeringAcceptor's ReconcileError in the status. -func (r *PeeringAcceptorController) updateStatusError(ctx context.Context, peeringAcceptor *consulv1alpha1.PeeringAcceptor, reconcileErr error) error { - peeringAcceptor.Status.ReconcileError = &consulv1alpha1.ReconcileErrorStatus{ +func (r *PeeringAcceptorController) updateStatusError(ctx context.Context, acceptor *consulv1alpha1.PeeringAcceptor, reconcileErr error) { + acceptor.Status.ReconcileError = &consulv1alpha1.ReconcileErrorStatus{ Error: pointerToBool(true), Message: pointerToString(reconcileErr.Error()), } - peeringAcceptor.Status.LastReconcileTime = &metav1.Time{Time: time.Now()} - err := r.Status().Update(ctx, peeringAcceptor) + acceptor.Status.LastReconcileTime = &metav1.Time{Time: time.Now()} + err := r.Status().Update(ctx, acceptor) if err != nil { - r.Log.Error(err, "failed to update PeeringAcceptor status", "name", peeringAcceptor.Name, "namespace", peeringAcceptor.Namespace) + r.Log.Error(err, "failed to update PeeringAcceptor status", "name", acceptor.Name, "namespace", acceptor.Namespace) } - return err } // getExistingSecret gets the K8s secret specified, and either returns the existing secret or nil if it doesn't exist. func (r *PeeringAcceptorController) getExistingSecret(ctx context.Context, name string, namespace string) (*corev1.Secret, error) { existingSecret := &corev1.Secret{} - namespacedName := types.NamespacedName{ - Name: name, - Namespace: namespace, - } + namespacedName := types.NamespacedName{Name: name, Namespace: namespace} err := r.Client.Get(ctx, namespacedName, existingSecret) if k8serrors.IsNotFound(err) { // The secret was deleted. @@ -257,14 +247,14 @@ func (r *PeeringAcceptorController) getExistingSecret(ctx context.Context, name // createK8sPeeringTokenSecretWithOwner creates a secret and uses the controller's K8s client to apply the secret. It // sets an owner reference to the PeeringAcceptor resource. It also checks if there's an existing secret with the same // name and makes sure to update the existing secret if so. -func (r *PeeringAcceptorController) createK8sPeeringTokenSecretWithOwner(ctx context.Context, peeringAcceptor *consulv1alpha1.PeeringAcceptor, resp *api.PeeringGenerateTokenResponse) (string, error) { - secretName := peeringAcceptor.Spec.Peer.Secret.Name - secretNamespace := peeringAcceptor.Namespace - secret := createSecret(secretName, secretNamespace, peeringAcceptor.Spec.Peer.Secret.Key, resp.PeeringToken) - if err := controllerutil.SetControllerReference(peeringAcceptor, secret, r.Scheme); err != nil { +func (r *PeeringAcceptorController) createK8sPeeringTokenSecretWithOwner(ctx context.Context, acceptor *consulv1alpha1.PeeringAcceptor, resp *api.PeeringGenerateTokenResponse) (string, error) { + secretName := acceptor.Secret().Name + secretNamespace := acceptor.Namespace + secret := createSecret(secretName, secretNamespace, acceptor.Secret().Key, resp.PeeringToken) + if err := controllerutil.SetControllerReference(acceptor, secret, r.Scheme); err != nil { return "", err } - existingSecret, err := r.getExistingSecret(ctx, peeringAcceptor.Spec.Peer.Secret.Name, peeringAcceptor.Namespace) + existingSecret, err := r.getExistingSecret(ctx, acceptor.Secret().Name, acceptor.Namespace) if err != nil { return "", err } @@ -327,6 +317,9 @@ func createSecret(name, namespace, key, value string) *corev1.Secret { StringData: map[string]string{ key: value, }, + Data: map[string][]byte{ + key: []byte(value), + }, } return secret } diff --git a/control-plane/connect-inject/peering_acceptor_controller_test.go b/control-plane/connect-inject/peering_acceptor_controller_test.go index 95d5eb3422..d3e51f0eee 100644 --- a/control-plane/connect-inject/peering_acceptor_controller_test.go +++ b/control-plane/connect-inject/peering_acceptor_controller_test.go @@ -38,7 +38,7 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { { name: "New PeeringAcceptor creates a peering in Consul and generates a token", k8sObjects: func() []runtime.Object { - peeringAcceptor := &v1alpha1.PeeringAcceptor{ + acceptor := &v1alpha1.PeeringAcceptor{ ObjectMeta: metav1.ObjectMeta{ Name: "acceptor-created", Namespace: "default", @@ -53,13 +53,15 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { }, }, } - return []runtime.Object{peeringAcceptor} + return []runtime.Object{acceptor} }, expectedStatus: &v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-created-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "data", + Backend: "kubernetes", + }, }, }, expectedConsulPeerings: []*api.Peering{ @@ -83,7 +85,7 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { { name: "When the secret already exists (not created by controller), it is updated with the contents of the new peering token and an owner reference is added", k8sObjects: func() []runtime.Object { - peeringAcceptor := &v1alpha1.PeeringAcceptor{ + acceptor := &v1alpha1.PeeringAcceptor{ ObjectMeta: metav1.ObjectMeta{ Name: "acceptor-created", Namespace: "default", @@ -99,13 +101,15 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { }, } secret := createSecret("acceptor-created-secret", "default", "some-old-key", "some-old-data") - return []runtime.Object{peeringAcceptor, secret} + return []runtime.Object{acceptor, secret} }, expectedStatus: &v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-created-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "data", + Backend: "kubernetes", + }, }, }, expectedConsulPeerings: []*api.Peering{ @@ -129,7 +133,7 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { { name: "PeeringAcceptor status secret exists and has different contents", k8sObjects: func() []runtime.Object { - peeringAcceptor := &v1alpha1.PeeringAcceptor{ + acceptor := &v1alpha1.PeeringAcceptor{ ObjectMeta: metav1.ObjectMeta{ Name: "acceptor-created", Namespace: "default", @@ -145,9 +149,11 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { }, Status: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-created-secret", - Key: "some-old-key", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "some-old-key", + Backend: "kubernetes", + }, ResourceVersion: "some-old-sha", }, }, @@ -163,13 +169,15 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { BlockOwnerDeletion: pointerToBool(true), }, } - return []runtime.Object{peeringAcceptor, secret} + return []runtime.Object{acceptor, secret} }, expectedStatus: &v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-created-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "data", + Backend: "kubernetes", + }, }, }, expectedConsulPeerings: []*api.Peering{ @@ -194,7 +202,7 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { { name: "PeeringAcceptor status secret exists and there's no peering in Consul", k8sObjects: func() []runtime.Object { - peeringAcceptor := &v1alpha1.PeeringAcceptor{ + acceptor := &v1alpha1.PeeringAcceptor{ ObjectMeta: metav1.ObjectMeta{ Name: "acceptor-created", Namespace: "default", @@ -210,21 +218,25 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { }, Status: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "some-old-secret", - Key: "some-old-key", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "some-old-secret", + Key: "some-old-key", + Backend: "kubernetes", + }, ResourceVersion: "some-old-sha", }, }, } secret := createSecret("some-old-secret", "default", "some-old-key", "some-old-data") - return []runtime.Object{peeringAcceptor, secret} + return []runtime.Object{acceptor, secret} }, expectedStatus: &v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-created-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "data", + Backend: "kubernetes", + }, }, }, expectedConsulPeerings: []*api.Peering{ @@ -252,7 +264,7 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { { name: "PeeringAcceptor status secret name is changed when there is a peering in Consul", k8sObjects: func() []runtime.Object { - peeringAcceptor := &v1alpha1.PeeringAcceptor{ + acceptor := &v1alpha1.PeeringAcceptor{ ObjectMeta: metav1.ObjectMeta{ Name: "acceptor-created", Namespace: "default", @@ -268,21 +280,25 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { }, Status: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "some-old-secret", - Key: "some-old-key", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "some-old-secret", + Key: "some-old-key", + Backend: "kubernetes", + }, ResourceVersion: "some-old-sha", }, }, } secret := createSecret("some-old-secret", "default", "some-old-key", "some-old-data") - return []runtime.Object{peeringAcceptor, secret} + return []runtime.Object{acceptor, secret} }, expectedStatus: &v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-created-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "data", + Backend: "kubernetes", + }, }, }, expectedConsulPeerings: []*api.Peering{ @@ -320,7 +336,7 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() - // Create test consul server + // Create test consul server. consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.NodeName = nodeName }) @@ -341,7 +357,7 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { } // Create the peering acceptor controller - pac := &PeeringAcceptorController{ + controller := &PeeringAcceptorController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConsulClient: consulClient, @@ -352,7 +368,7 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { Namespace: "default", } - resp, err := pac.Reconcile(context.Background(), ctrl.Request{ + resp, err := controller.Reconcile(context.Background(), ctrl.Request{ NamespacedName: namespacedName, }) if tt.expErr != "" { @@ -400,13 +416,13 @@ func TestReconcileCreateUpdatePeeringAcceptor(t *testing.T) { require.Equal(t, true, *createdSecret.OwnerReferences[0].Controller) // Get the reconciled PeeringAcceptor and make assertions on the status - peeringAcceptorReconciled := &v1alpha1.PeeringAcceptor{} - err = fakeClient.Get(context.Background(), namespacedName, peeringAcceptorReconciled) + acceptor := &v1alpha1.PeeringAcceptor{} + err = fakeClient.Get(context.Background(), namespacedName, acceptor) require.NoError(t, err) if tt.expectedStatus != nil { - require.Equal(t, tt.expectedStatus.SecretRef.Name, peeringAcceptorReconciled.Status.SecretRef.Name) - require.Equal(t, tt.expectedStatus.SecretRef.Key, peeringAcceptorReconciled.Status.SecretRef.Key) - require.Equal(t, tt.expectedStatus.SecretRef.Backend, peeringAcceptorReconciled.Status.SecretRef.Backend) + require.Equal(t, tt.expectedStatus.SecretRef.Name, acceptor.SecretRef().Name) + require.Equal(t, tt.expectedStatus.SecretRef.Key, acceptor.SecretRef().Key) + require.Equal(t, tt.expectedStatus.SecretRef.Backend, acceptor.SecretRef().Backend) } // Check that old secret was deleted. if tt.expectDeletedK8sSecret != nil { @@ -470,7 +486,7 @@ func TestReconcileDeletePeeringAcceptor(t *testing.T) { require.NoError(t, err) // Create the peering acceptor controller. - pac := &PeeringAcceptorController{ + controller := &PeeringAcceptorController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConsulClient: consulClient, @@ -482,7 +498,7 @@ func TestReconcileDeletePeeringAcceptor(t *testing.T) { } // Reconcile a resource that is not in K8s, but is still in Consul. - resp, err := pac.Reconcile(context.Background(), ctrl.Request{ + resp, err := controller.Reconcile(context.Background(), ctrl.Request{ NamespacedName: namespacedName, }) if tt.expErr != "" { @@ -527,9 +543,11 @@ func TestShouldGenerateToken(t *testing.T) { }, Status: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-secret", + Key: "data", + Backend: "kubernetes", + }, ResourceVersion: "1", }, }, @@ -561,9 +579,11 @@ func TestShouldGenerateToken(t *testing.T) { }, Status: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-secret", - Key: "data-old", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-secret", + Key: "data-old", + Backend: "kubernetes", + }, ResourceVersion: "1", }, }, @@ -595,9 +615,11 @@ func TestShouldGenerateToken(t *testing.T) { }, Status: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-secret-old", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-secret-old", + Key: "data", + Backend: "kubernetes", + }, ResourceVersion: "1", }, }, @@ -629,9 +651,11 @@ func TestShouldGenerateToken(t *testing.T) { }, Status: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-secret", + Key: "data", + Backend: "kubernetes", + }, ResourceVersion: "1", }, }, @@ -664,9 +688,11 @@ func TestShouldGenerateToken(t *testing.T) { }, Status: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-secret", + Key: "data", + Backend: "kubernetes", + }, ResourceVersion: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, }, @@ -696,7 +722,7 @@ func TestShouldGenerateToken(t *testing.T) { } } -func TestUpdateStatus(t *testing.T) { +func TestAcceptorUpdateStatus(t *testing.T) { cases := []struct { name string peeringAcceptor *v1alpha1.PeeringAcceptor @@ -723,9 +749,11 @@ func TestUpdateStatus(t *testing.T) { resourceVersion: "1234", expStatus: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-secret", + Key: "data", + Backend: "kubernetes", + }, ResourceVersion: "1234", }, ReconcileError: &v1alpha1.ReconcileErrorStatus{ @@ -752,9 +780,11 @@ func TestUpdateStatus(t *testing.T) { }, Status: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "old-name", - Key: "old-key", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "old-name", + Key: "old-key", + Backend: "kubernetes", + }, ResourceVersion: "old-resource-version", }, }, @@ -762,9 +792,11 @@ func TestUpdateStatus(t *testing.T) { resourceVersion: "1234", expStatus: v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ - Name: "acceptor-secret", - Key: "data", - Backend: "kubernetes", + Secret: v1alpha1.Secret{ + Name: "acceptor-secret", + Key: "data", + Backend: "kubernetes", + }, ResourceVersion: "1234", }, ReconcileError: &v1alpha1.ReconcileErrorStatus{ @@ -796,33 +828,33 @@ func TestUpdateStatus(t *testing.T) { err := pac.updateStatus(context.Background(), tt.peeringAcceptor, tt.resourceVersion) require.NoError(t, err) - peeringAcceptor := &v1alpha1.PeeringAcceptor{} - peeringAcceptorName := types.NamespacedName{ + acceptor := &v1alpha1.PeeringAcceptor{} + acceptorName := types.NamespacedName{ Name: "acceptor", Namespace: "default", } - err = fakeClient.Get(context.Background(), peeringAcceptorName, peeringAcceptor) + err = fakeClient.Get(context.Background(), acceptorName, acceptor) require.NoError(t, err) - require.Equal(t, tt.expStatus.SecretRef.Name, peeringAcceptor.Status.SecretRef.Name) - require.Equal(t, tt.expStatus.SecretRef.Key, peeringAcceptor.Status.SecretRef.Key) - require.Equal(t, tt.expStatus.SecretRef.Backend, peeringAcceptor.Status.SecretRef.Backend) - require.Equal(t, tt.expStatus.SecretRef.ResourceVersion, peeringAcceptor.Status.SecretRef.ResourceVersion) - require.Equal(t, *tt.expStatus.ReconcileError.Error, *peeringAcceptor.Status.ReconcileError.Error) + require.Equal(t, tt.expStatus.SecretRef.Name, acceptor.SecretRef().Name) + require.Equal(t, tt.expStatus.SecretRef.Key, acceptor.SecretRef().Key) + require.Equal(t, tt.expStatus.SecretRef.Backend, acceptor.SecretRef().Backend) + require.Equal(t, tt.expStatus.SecretRef.ResourceVersion, acceptor.SecretRef().ResourceVersion) + require.Equal(t, *tt.expStatus.ReconcileError.Error, *acceptor.Status.ReconcileError.Error) }) } } -func TestUpdateStatusError(t *testing.T) { +func TestAcceptorUpdateStatusError(t *testing.T) { cases := []struct { - name string - peeringAcceptor *v1alpha1.PeeringAcceptor - reconcileErr error - expStatus v1alpha1.PeeringAcceptorStatus + name string + acceptor *v1alpha1.PeeringAcceptor + reconcileErr error + expStatus v1alpha1.PeeringAcceptorStatus }{ { name: "updates status when there's no existing status", - peeringAcceptor: &v1alpha1.PeeringAcceptor{ + acceptor: &v1alpha1.PeeringAcceptor{ ObjectMeta: metav1.ObjectMeta{ Name: "acceptor", Namespace: "default", @@ -847,7 +879,7 @@ func TestUpdateStatusError(t *testing.T) { }, { name: "updates status when there is an existing status", - peeringAcceptor: &v1alpha1.PeeringAcceptor{ + acceptor: &v1alpha1.PeeringAcceptor{ ObjectMeta: metav1.ObjectMeta{ Name: "acceptor", Namespace: "default", @@ -883,30 +915,29 @@ func TestUpdateStatusError(t *testing.T) { ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} // Create fake k8s client. k8sObjects := []runtime.Object{&ns} - k8sObjects = append(k8sObjects, tt.peeringAcceptor) + k8sObjects = append(k8sObjects, tt.acceptor) // Add peering types to the scheme. s := scheme.Scheme s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() // Create the peering acceptor controller. - pac := &PeeringAcceptorController{ + controller := &PeeringAcceptorController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, Scheme: s, } - err := pac.updateStatusError(context.Background(), tt.peeringAcceptor, tt.reconcileErr) - require.NoError(t, err) + controller.updateStatusError(context.Background(), tt.acceptor, tt.reconcileErr) - peeringAcceptor := &v1alpha1.PeeringAcceptor{} - peeringAcceptorName := types.NamespacedName{ + acceptor := &v1alpha1.PeeringAcceptor{} + acceptorName := types.NamespacedName{ Name: "acceptor", Namespace: "default", } - err = fakeClient.Get(context.Background(), peeringAcceptorName, peeringAcceptor) + err := fakeClient.Get(context.Background(), acceptorName, acceptor) require.NoError(t, err) - require.Equal(t, *tt.expStatus.ReconcileError.Error, *peeringAcceptor.Status.ReconcileError.Error) + require.Equal(t, *tt.expStatus.ReconcileError.Error, *acceptor.Status.ReconcileError.Error) }) } diff --git a/control-plane/connect-inject/peering_dialer_controller.go b/control-plane/connect-inject/peering_dialer_controller.go index af01807035..a93aa3b47b 100644 --- a/control-plane/connect-inject/peering_dialer_controller.go +++ b/control-plane/connect-inject/peering_dialer_controller.go @@ -37,15 +37,14 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques r.Log.Info("received request for PeeringDialer:", "name", req.Name, "ns", req.Namespace) // Get the PeeringDialer resource. - peeringDialer := &consulv1alpha1.PeeringDialer{} - err := r.Client.Get(ctx, req.NamespacedName, peeringDialer) + dialer := &consulv1alpha1.PeeringDialer{} + err := r.Client.Get(ctx, req.NamespacedName, dialer) // If the PeeringDialer resource has been deleted (and we get an IsNotFound // error), we need to delete it in Consul. if k8serrors.IsNotFound(err) { r.Log.Info("PeeringDialer was deleted, deleting from Consul", "name", req.Name, "ns", req.Namespace) - err := r.deletePeering(ctx, req.Name) - if err != nil { + if err := r.deletePeering(ctx, req.Name); err != nil { return ctrl.Result{}, err } return ctrl.Result{}, nil @@ -55,112 +54,105 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques } // Get the status secret and the spec secret. - // Cases need to handle statusSecretSet, existingStatusSecret, specSecretSet, existingSpecSecret. - // no specSecretSet --> error bc spec needs to be set. - // no existingSpecSecret --> error bc waiting for spec secret to exist. - // no statusSecretSet, yes specSecretSet, no existingStatusSecret, yes existingSpecSecret --> initiate peering. - // yes statusSecretSet, yes specSecretSet, no existingStatusSecret, yes existingSpecSecret --> initiate peering. - // yes statusSecretSet, yes specSecretSet, yes existingStatusSecret, yes existingSpecSecret --> compare contents, if + // Cases need to handle secretRefSet, statusSecret, secretSet, specSecret. + // no secretSet --> error bc spec needs to be set. + // no specSecret --> error bc waiting for spec secret to exist. + // no secretRefSet, yes secretSet, no statusSecret, yes specSecret --> initiate peering. + // yes secretRefSet, yes secretSet, no statusSecret, yes specSecret --> initiate peering. + // yes secretRefSet, yes secretSet, yes statusSecret, yes specSecret --> compare contents, if // different initiate peering. - // Get the status secret and the spec secret. - statusSecretSet := false - if peeringDialer.Status.SecretRef != nil { - statusSecretSet = true - } // TODO(peering): remove this once CRD validation exists. - specSecretSet := false - if peeringDialer.Spec.Peer != nil { - if peeringDialer.Spec.Peer.Secret != nil { - specSecretSet = true - } + secretSet := false + if dialer.Secret() != nil { + secretSet = true } - if !specSecretSet { + if !secretSet { err = errors.New("PeeringDialer spec.peer.secret was not set") - _ = r.updateStatusError(ctx, peeringDialer, err) + r.updateStatusError(ctx, dialer, err) return ctrl.Result{}, err } - // existingStatusSecret will be nil if the secret specified by the status doesn't exist. - var existingStatusSecret *corev1.Secret - if statusSecretSet { - _, existingStatusSecret, err = r.getExistingSecret(ctx, peeringDialer.Status.SecretRef.Name, peeringDialer.Namespace) - if err != nil { - _ = r.updateStatusError(ctx, peeringDialer, err) - return ctrl.Result{}, err - } - } - - // existingSpecSecret will be nil if the secret specified by the spec doesn't exist. - var existingSpecSecret *corev1.Secret - if specSecretSet { - _, existingSpecSecret, err = r.getExistingSecret(ctx, peeringDialer.Spec.Peer.Secret.Name, peeringDialer.Namespace) - if err != nil { - _ = r.updateStatusError(ctx, peeringDialer, err) - return ctrl.Result{}, err - } + // specSecret will be nil if the secret specified by the spec doesn't exist. + var specSecret *corev1.Secret + specSecret, err = r.getSecret(ctx, dialer.Secret().Name, dialer.Namespace) + if err != nil { + r.updateStatusError(ctx, dialer, err) + return ctrl.Result{}, err } - // If spec secret doesn't exist, error because we can only initiate peering if we have a token to initiate with. - if existingSpecSecret == nil { + // If specSecret doesn't exist, error because we can only initiate peering if we have a token to initiate with. + if specSecret == nil { err = errors.New("PeeringDialer spec.peer.secret does not exist") - _ = r.updateStatusError(ctx, peeringDialer, err) + r.updateStatusError(ctx, dialer, err) return ctrl.Result{}, err } - // Read the peering from Consul. - // TODO(peering): do we need to pass in partition? - r.Log.Info("reading peering from Consul", "name", peeringDialer.Name) - peering, _, err := r.ConsulClient.Peerings().Read(ctx, peeringDialer.Name, nil) - if err != nil { - r.Log.Error(err, "failed to get Peering from Consul", "name", req.Name) - return ctrl.Result{}, err + // Check if the status has a secretRef. + secretRefSet := false + if dialer.SecretRef() != nil { + secretRefSet = true + } + + // statusSecret will be nil if the secret specified by the status doesn't exist. + var statusSecret *corev1.Secret + if secretRefSet { + statusSecret, err = r.getSecret(ctx, dialer.SecretRef().Name, dialer.Namespace) + if err != nil { + r.updateStatusError(ctx, dialer, err) + return ctrl.Result{}, err + } } - peeringExists := peering != nil - // TODO(peering): Verify that the existing peering in Consul is an dialer peer. If it is an acceptor peer, an error should be thrown. // At this point, we know the spec secret exists. If the status secret doesn't // exist, then we want to initiate peering and update the status with the secret for the token being used. - if existingStatusSecret == nil { + if statusSecret == nil { // Whether the peering exists in Consul or not we want to initiate the peering so the status can reflect the // correct secret specified in the spec. - r.Log.Info("status.secret doesn't exist or wasn't set, establishing peering with the existing spec.peer.secret", "secret-name", peeringDialer.Spec.Peer.Secret.Name, "secret-namespace", peeringDialer.Namespace) - peeringToken := existingSpecSecret.Data[peeringDialer.Spec.Peer.Secret.Key] - _, err := r.initiatePeering(ctx, peeringDialer.Name, string(peeringToken)) - if err != nil { - _ = r.updateStatusError(ctx, peeringDialer, err) + r.Log.Info("the secret in status.secretRef doesn't exist or wasn't set, establishing peering with the existing spec.peer.secret", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) + peeringToken := specSecret.Data[dialer.Secret().Key] + if err := r.initiatePeering(ctx, dialer.Name, string(peeringToken)); err != nil { + r.updateStatusError(ctx, dialer, err) return ctrl.Result{}, err } else { - err := r.updateStatus(ctx, peeringDialer, existingSpecSecret.ResourceVersion) + err := r.updateStatus(ctx, dialer, specSecret.ResourceVersion) return ctrl.Result{}, err } } else { // At this point, the status secret does exist. // If the peering in Consul does not exist, initiate peering. - if !peeringExists { - r.Log.Info("status.secret exists, but the peering doesn't exist in Consul; establishing peering with the existing spec.peer.secret", "secret-name", peeringDialer.Spec.Peer.Secret.Name, "secret-namespace", peeringDialer.Namespace) - peeringToken := existingSpecSecret.Data[peeringDialer.Spec.Peer.Secret.Key] - _, err := r.initiatePeering(ctx, peeringDialer.Name, string(peeringToken)) - if err != nil { - _ = r.updateStatusError(ctx, peeringDialer, err) + + // Read the peering from Consul. + r.Log.Info("reading peering from Consul", "name", dialer.Name) + peering, _, err := r.ConsulClient.Peerings().Read(ctx, dialer.Name, nil) + if err != nil { + r.Log.Error(err, "failed to get Peering from Consul", "name", req.Name) + return ctrl.Result{}, err + } + // TODO(peering): Verify that the existing peering in Consul is an dialer peer. If it is an acceptor peer, an error should be thrown. + + if peering == nil { + r.Log.Info("status.secret exists, but the peering doesn't exist in Consul; establishing peering with the existing spec.peer.secret", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) + peeringToken := specSecret.Data[dialer.Secret().Key] + if err := r.initiatePeering(ctx, dialer.Name, string(peeringToken)); err != nil { + r.updateStatusError(ctx, dialer, err) return ctrl.Result{}, err } else { - err := r.updateStatus(ctx, peeringDialer, existingSpecSecret.ResourceVersion) + err := r.updateStatus(ctx, dialer, specSecret.ResourceVersion) return ctrl.Result{}, err } } // Or, if the peering in Consul does exist, compare it to the contents of the spec's secret. If there's any // differences, initiate peering. - if r.specStatusSecretsDifferent(peeringDialer, existingSpecSecret) { - r.Log.Info("status.secret exists and is different from spec.peer.secret; establishing peering with the existing spec.peer.secret", "secret-name", peeringDialer.Spec.Peer.Secret.Name, "secret-namespace", peeringDialer.Namespace) - peeringToken := existingSpecSecret.Data[peeringDialer.Spec.Peer.Secret.Key] - _, err := r.initiatePeering(ctx, peeringDialer.Name, string(peeringToken)) - if err != nil { - _ = r.updateStatusError(ctx, peeringDialer, err) + if r.specStatusSecretsDifferent(dialer, specSecret) { + r.Log.Info("the secret in status.secretRef exists and is different from spec.peer.secret; establishing peering with the existing spec.peer.secret", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) + peeringToken := specSecret.Data[dialer.Secret().Key] + if err := r.initiatePeering(ctx, dialer.Name, string(peeringToken)); err != nil { + r.updateStatusError(ctx, dialer, err) return ctrl.Result{}, err } else { - err := r.updateStatus(ctx, peeringDialer, existingSpecSecret.ResourceVersion) + err := r.updateStatus(ctx, dialer, specSecret.ResourceVersion) return ctrl.Result{}, err } } @@ -169,66 +161,60 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } -func (r *PeeringDialerController) specStatusSecretsDifferent(peeringDialer *consulv1alpha1.PeeringDialer, existingSpecSecret *corev1.Secret) bool { - if peeringDialer.Status.SecretRef.Name != peeringDialer.Spec.Peer.Secret.Name { +func (r *PeeringDialerController) specStatusSecretsDifferent(dialer *consulv1alpha1.PeeringDialer, existingSpecSecret *corev1.Secret) bool { + if dialer.SecretRef().Name != dialer.Secret().Name { return true } - if peeringDialer.Status.SecretRef.Key != peeringDialer.Spec.Peer.Secret.Key { + if dialer.SecretRef().Key != dialer.Secret().Key { return true } - if peeringDialer.Status.SecretRef.Backend != peeringDialer.Spec.Peer.Secret.Backend { + if dialer.SecretRef().Backend != dialer.Secret().Backend { return true } - existingSpecSecretResourceVersion := existingSpecSecret.ResourceVersion - return existingSpecSecretResourceVersion != peeringDialer.Status.SecretRef.ResourceVersion + return dialer.SecretRef().ResourceVersion != existingSpecSecret.ResourceVersion } -func (r *PeeringDialerController) updateStatus(ctx context.Context, peeringDialer *consulv1alpha1.PeeringDialer, resourceVersion string) error { - peeringDialer.Status.SecretRef = &consulv1alpha1.SecretRefStatus{ - Name: peeringDialer.Spec.Peer.Secret.Name, - Key: peeringDialer.Spec.Peer.Secret.Key, - Backend: peeringDialer.Spec.Peer.Secret.Backend, +func (r *PeeringDialerController) updateStatus(ctx context.Context, dialer *consulv1alpha1.PeeringDialer, resourceVersion string) error { + dialer.Status.SecretRef = &consulv1alpha1.SecretRefStatus{ + Secret: *dialer.Spec.Peer.Secret, + ResourceVersion: resourceVersion, } - - peeringDialer.Status.SecretRef.ResourceVersion = resourceVersion - - peeringDialer.Status.LastReconcileTime = &metav1.Time{Time: time.Now()} - err := r.Status().Update(ctx, peeringDialer) + dialer.Status.LastReconcileTime = &metav1.Time{Time: time.Now()} + dialer.Status.ReconcileError = &consulv1alpha1.ReconcileErrorStatus{ + Error: pointerToBool(false), + Message: pointerToString(""), + } + err := r.Status().Update(ctx, dialer) if err != nil { - r.Log.Error(err, "failed to update PeeringDialer status", "name", peeringDialer.Name, "namespace", peeringDialer.Namespace) + r.Log.Error(err, "failed to update PeeringDialer status", "name", dialer.Name, "namespace", dialer.Namespace) } return err } -func (r *PeeringDialerController) updateStatusError(ctx context.Context, peeringDialer *consulv1alpha1.PeeringDialer, reconcileErr error) error { - peeringDialer.Status.ReconcileError = &consulv1alpha1.ReconcileErrorStatus{ +func (r *PeeringDialerController) updateStatusError(ctx context.Context, dialer *consulv1alpha1.PeeringDialer, reconcileErr error) { + dialer.Status.ReconcileError = &consulv1alpha1.ReconcileErrorStatus{ Error: pointerToBool(true), Message: pointerToString(reconcileErr.Error()), } - - peeringDialer.Status.LastReconcileTime = &metav1.Time{Time: time.Now()} - err := r.Status().Update(ctx, peeringDialer) + dialer.Status.LastReconcileTime = &metav1.Time{Time: time.Now()} + err := r.Status().Update(ctx, dialer) if err != nil { - r.Log.Error(err, "failed to update PeeringDialer status", "name", peeringDialer.Name, "namespace", peeringDialer.Namespace) + r.Log.Error(err, "failed to update PeeringDialer status", "name", dialer.Name, "namespace", dialer.Namespace) } - return err } -func (r *PeeringDialerController) getExistingSecret(ctx context.Context, name string, namespace string) (bool, *corev1.Secret, error) { - existingSecret := &corev1.Secret{} - namespacedName := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - err := r.Client.Get(ctx, namespacedName, existingSecret) +func (r *PeeringDialerController) getSecret(ctx context.Context, name string, namespace string) (*corev1.Secret, error) { + secret := &corev1.Secret{} + namespacedName := types.NamespacedName{Name: name, Namespace: namespace} + err := r.Client.Get(ctx, namespacedName, secret) if k8serrors.IsNotFound(err) { // The secret was deleted. - return false, nil, nil + return nil, nil } else if err != nil { r.Log.Error(err, "couldn't get secret", "name", name, "namespace", namespace) - return false, nil, err + return nil, err } - return true, existingSecret, nil + return secret, nil } // SetupWithManager sets up the controller with the Manager. @@ -239,17 +225,17 @@ func (r *PeeringDialerController) SetupWithManager(mgr ctrl.Manager) error { } // initiatePeering is a helper function that calls the Consul api to generate a token for the peer. -func (r *PeeringDialerController) initiatePeering(ctx context.Context, peerName string, peeringToken string) (*api.PeeringInitiateResponse, error) { +func (r *PeeringDialerController) initiatePeering(ctx context.Context, peerName string, peeringToken string) error { req := api.PeeringInitiateRequest{ PeerName: peerName, PeeringToken: peeringToken, } - resp, _, err := r.ConsulClient.Peerings().Initiate(ctx, req, nil) + _, _, err := r.ConsulClient.Peerings().Initiate(ctx, req, nil) if err != nil { r.Log.Error(err, "failed to initiate peering", "err", err) - return nil, err + return err } - return resp, nil + return nil } // deletePeering is a helper function that calls the Consul api to delete a peering. diff --git a/control-plane/connect-inject/peering_dialer_controller_test.go b/control-plane/connect-inject/peering_dialer_controller_test.go index f9c5e52361..0a74638e3e 100644 --- a/control-plane/connect-inject/peering_dialer_controller_test.go +++ b/control-plane/connect-inject/peering_dialer_controller_test.go @@ -2,6 +2,11 @@ package connectinject import ( "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" "testing" logrtest "github.com/go-logr/logr/testing" @@ -18,6 +23,465 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) +// TestReconcileCreateUpdatePeeringDialer creates a peering dialer. +func TestReconcileCreateUpdatePeeringDialer(t *testing.T) { + t.Parallel() + nodeName := "test-node" + node2Name := "test-node2" + cases := map[string]struct { + peeringName string + k8sObjects func() []runtime.Object + expectedConsulPeerings *api.Peering + peeringSecret func(token string) *corev1.Secret + expErr string + expectedStatus *v1alpha1.PeeringDialerStatus + expectDeletedK8sSecret *types.NamespacedName + peeringExists bool + }{ + "Errors when Secret is not set on the spec": { + k8sObjects: func() []runtime.Object { + dialer := &v1alpha1.PeeringDialer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peering", + Namespace: "default", + }, + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: nil, + }, + }, + } + return []runtime.Object{dialer} + }, + expErr: "PeeringDialer spec.peer.secret was not set", + peeringSecret: func(_ string) *corev1.Secret { return nil }, + }, + "Errors when Secret set on the spec does not exist in the cluster": { + k8sObjects: func() []runtime.Object { + dialer := &v1alpha1.PeeringDialer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peering", + Namespace: "default", + }, + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "dialer", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + } + return []runtime.Object{dialer} + }, + expErr: "PeeringDialer spec.peer.secret does not exist", + peeringSecret: func(_ string) *corev1.Secret { return nil }, + }, + "Initiates peering when status secret is nil": { + peeringName: "peering", + k8sObjects: func() []runtime.Object { + dialer := &v1alpha1.PeeringDialer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peering", + Namespace: "default", + }, + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "dialer-token", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + } + return []runtime.Object{dialer} + }, + expectedConsulPeerings: &api.Peering{ + Name: "peering", + State: api.PeeringStateActive, + }, + peeringSecret: func(token string) *corev1.Secret { + return createSecret("dialer-token", "default", "token", token) + }, + expectedStatus: &v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "dialer-token", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + }, + "Initiates peering when status secret is set but peering is not found in Consul": { + peeringName: "peering", + k8sObjects: func() []runtime.Object { + dialer := &v1alpha1.PeeringDialer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peering", + Namespace: "default", + }, + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "dialer-token", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + Status: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "dialer-token", + Key: "token", + Backend: "kubernetes", + }, + ResourceVersion: "test-version", + }, + }, + } + return []runtime.Object{dialer} + }, + expectedConsulPeerings: &api.Peering{ + Name: "peering", + State: api.PeeringStateActive, + }, + peeringSecret: func(token string) *corev1.Secret { + return createSecret("dialer-token", "default", "token", token) + }, + expectedStatus: &v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "dialer-token", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + }, + "Initiates peering when status secret is set, peering is found, but out of date": { + peeringName: "peering", + k8sObjects: func() []runtime.Object { + dialer := &v1alpha1.PeeringDialer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "peering", + Namespace: "default", + }, + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "dialer-token", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + Status: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "dialer-token-old", + Key: "token", + Backend: "kubernetes", + }, + ResourceVersion: "test-version", + }, + }, + } + return []runtime.Object{dialer} + }, + expectedConsulPeerings: &api.Peering{ + Name: "peering", + State: api.PeeringStateActive, + }, + peeringSecret: func(token string) *corev1.Secret { + return createSecret("dialer-token", "default", "token", token) + }, + expectedStatus: &v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "dialer-token", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + peeringExists: true, + }, + } + for name, tt := range cases { + t.Run(name, func(t *testing.T) { + + // Create test consul server. + acceptorPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer acceptorPeerServer.Stop() + acceptorPeerServer.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: acceptorPeerServer.HTTPAddr, + } + acceptorClient, err := api.NewClient(cfg) + require.NoError(t, err) + + // Add the default namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + // Create fake k8s client + k8sObjects := append(tt.k8sObjects(), &ns) + + // This is responsible for updating the token generated by the acceptor side with the IP + // of the Consul server as the generated token currently does not have that set on it. + var encodedPeeringToken string + if tt.peeringName != "" { + var token struct { + CA string + ServerAddresses []string + ServerName string + PeerID string + } + // Create the initial token. + baseToken, _, err := acceptorClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: tt.peeringName}, nil) + require.NoError(t, err) + // Decode the token to extract the ServerName and PeerID from the token. CA is always NULL. + decodeBytes, err := base64.StdEncoding.DecodeString(baseToken.PeeringToken) + require.NoError(t, err) + err = json.Unmarshal(decodeBytes, &token) + require.NoError(t, err) + // Get the IP of the Consul server. + addr := strings.Split(acceptorPeerServer.HTTPAddr, ":")[0] + // Generate expected token for Peering Initiate. + tokenString := fmt.Sprintf(`{"CA":null,"ServerAddresses":["%s:8300"],"ServerName":"%s","PeerID":"%s"}`, addr, token.ServerName, token.PeerID) + // Create peering initiate secret in Kubernetes. + encodedPeeringToken = base64.StdEncoding.EncodeToString([]byte(tokenString)) + secret := tt.peeringSecret(encodedPeeringToken) + secret.SetResourceVersion("latest-version") + k8sObjects = append(k8sObjects, secret) + } + + // Create test consul server. + dialerPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = node2Name + }) + require.NoError(t, err) + defer dialerPeerServer.Stop() + dialerPeerServer.WaitForServiceIntentions(t) + + cfg = &api.Config{ + Address: dialerPeerServer.HTTPAddr, + } + dialerClient, err := api.NewClient(cfg) + require.NoError(t, err) + + if tt.peeringExists { + _, _, err := dialerClient.Peerings().Initiate(context.Background(), api.PeeringInitiateRequest{PeerName: tt.peeringName, PeeringToken: encodedPeeringToken}, nil) + require.NoError(t, err) + k8sObjects = append(k8sObjects, createSecret("dialer-token-old", "default", "token", "old-token")) + } + + s := scheme.Scheme + s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringDialer{}, &v1alpha1.PeeringDialerList{}) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() + + // Create the peering dialer controller + controller := &PeeringDialerController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: dialerClient, + Scheme: s, + } + namespacedName := types.NamespacedName{ + Name: "peering", + Namespace: "default", + } + + resp, err := controller.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + if tt.expErr != "" { + require.EqualError(t, err, tt.expErr) + } else { + require.NoError(t, err) + require.False(t, resp.Requeue) + + // After reconciliation, Consul should have the peering. + peering, _, err := dialerClient.Peerings().Read(context.Background(), "peering", nil) + require.NoError(t, err) + require.Equal(t, tt.expectedConsulPeerings.Name, peering.Name) + // TODO(peering): update this assertion once peering states are supported. + //require.Equal(t, api.PeeringStateActive, peering.State) + require.NotEmpty(t, peering.ID) + + // Get the reconciled PeeringDialer and make assertions on the status + dialer := &v1alpha1.PeeringDialer{} + err = fakeClient.Get(context.Background(), namespacedName, dialer) + require.NoError(t, err) + if tt.expectedStatus != nil { + require.Equal(t, tt.expectedStatus.SecretRef.Name, dialer.SecretRef().Name) + require.Equal(t, tt.expectedStatus.SecretRef.Key, dialer.SecretRef().Key) + require.Equal(t, tt.expectedStatus.SecretRef.Backend, dialer.SecretRef().Backend) + require.Equal(t, "latest-version", dialer.SecretRef().ResourceVersion) + require.NotEmpty(t, dialer.SecretRef().ResourceVersion) + require.NotEqual(t, "test-version", dialer.SecretRef().ResourceVersion) + } + } + }) + } +} + +// TestSpecStatusSecretsDifferent tests that the correct result is returned +// when comparing the secret in the status against the existing secret. +func TestSpecStatusSecretsDifferent(t *testing.T) { + t.Parallel() + cases := map[string]struct { + dialer *v1alpha1.PeeringDialer + secret *corev1.Secret + isDifferent bool + }{ + "different secret name in spec and status": { + dialer: &v1alpha1.PeeringDialer{ + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "foo", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + Status: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "bar", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + }, + secret: nil, + isDifferent: true, + }, + "different secret key in spec and status": { + dialer: &v1alpha1.PeeringDialer{ + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "foo", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + Status: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "foo", + Key: "key", + Backend: "kubernetes", + }, + }, + }, + }, + secret: nil, + isDifferent: true, + }, + "different secret backend in spec and status": { + dialer: &v1alpha1.PeeringDialer{ + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "foo", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + Status: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "foo", + Key: "token", + Backend: "vault", + }, + }, + }, + }, + secret: nil, + isDifferent: true, + }, + "different secret ref in status and saved secret": { + dialer: &v1alpha1.PeeringDialer{ + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "foo", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + Status: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "foo", + Key: "token", + Backend: "kubernetes", + }, + ResourceVersion: "version1", + }, + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "version2", + }, + }, + isDifferent: true, + }, + "same secret ref in status and saved secret": { + dialer: &v1alpha1.PeeringDialer{ + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "foo", + Key: "token", + Backend: "kubernetes", + }, + }, + }, + Status: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "foo", + Key: "token", + Backend: "kubernetes", + }, + ResourceVersion: "version1", + }, + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "version1", + }, + }, + isDifferent: false, + }, + } + + for name, tt := range cases { + t.Run(name, func(t *testing.T) { + controller := PeeringDialerController{} + isDifferent := controller.specStatusSecretsDifferent(tt.dialer, tt.secret) + require.Equal(t, tt.isDifferent, isDifferent) + }) + } +} + // TestReconcileDeletePeeringDialer reconciles a PeeringDialer resource that is no longer in Kubernetes, but still // exists in Consul. func TestReconcileDeletePeeringDialer(t *testing.T) { @@ -66,7 +530,7 @@ func TestReconcileDeletePeeringDialer(t *testing.T) { _, _, err = consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: tt.initialConsulPeerNames[0]}, nil) require.NoError(t, err) - // Create the peering acceptor controller. + // Create the peering dialer controller. pdc := &PeeringDialerController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, @@ -96,3 +560,223 @@ func TestReconcileDeletePeeringDialer(t *testing.T) { }) } } + +func TestDialerUpdateStatus(t *testing.T) { + cases := []struct { + name string + peeringDialer *v1alpha1.PeeringDialer + resourceVersion string + expStatus v1alpha1.PeeringDialerStatus + }{ + { + name: "updates status when there's no existing status", + peeringDialer: &v1alpha1.PeeringDialer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dialer", + Namespace: "default", + }, + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "dialer-secret", + Key: "data", + Backend: "kubernetes", + }, + }, + }, + }, + resourceVersion: "1234", + expStatus: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "dialer-secret", + Key: "data", + Backend: "kubernetes", + }, + ResourceVersion: "1234", + }, + ReconcileError: &v1alpha1.ReconcileErrorStatus{ + Error: pointerToBool(false), + Message: pointerToString(""), + }, + }, + }, + { + name: "updates status when there is an existing status", + peeringDialer: &v1alpha1.PeeringDialer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dialer", + Namespace: "default", + }, + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "dialer-secret", + Key: "data", + Backend: "kubernetes", + }, + }, + }, + Status: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "old-name", + Key: "old-key", + Backend: "kubernetes", + }, + ResourceVersion: "old-resource-version", + }, + }, + }, + resourceVersion: "1234", + expStatus: v1alpha1.PeeringDialerStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "dialer-secret", + Key: "data", + Backend: "kubernetes", + }, + ResourceVersion: "1234", + }, + ReconcileError: &v1alpha1.ReconcileErrorStatus{ + Error: pointerToBool(false), + Message: pointerToString(""), + }, + }, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // Add the default namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + // Create fake k8s client. + k8sObjects := []runtime.Object{&ns} + k8sObjects = append(k8sObjects, tt.peeringDialer) + + // Add peering types to the scheme. + s := scheme.Scheme + s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringDialer{}, &v1alpha1.PeeringDialerList{}) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() + // Create the peering dialer controller. + controller := &PeeringDialerController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + Scheme: s, + } + + err := controller.updateStatus(context.Background(), tt.peeringDialer, tt.resourceVersion) + require.NoError(t, err) + + dialer := &v1alpha1.PeeringDialer{} + dialerName := types.NamespacedName{ + Name: "dialer", + Namespace: "default", + } + err = fakeClient.Get(context.Background(), dialerName, dialer) + require.NoError(t, err) + require.Equal(t, tt.expStatus.SecretRef.Name, dialer.SecretRef().Name) + require.Equal(t, tt.expStatus.SecretRef.Key, dialer.SecretRef().Key) + require.Equal(t, tt.expStatus.SecretRef.Backend, dialer.SecretRef().Backend) + require.Equal(t, tt.expStatus.SecretRef.ResourceVersion, dialer.SecretRef().ResourceVersion) + require.Equal(t, *tt.expStatus.ReconcileError.Error, *dialer.Status.ReconcileError.Error) + }) + } +} + +func TestDialerUpdateStatusError(t *testing.T) { + cases := []struct { + name string + dialer *v1alpha1.PeeringDialer + reconcileErr error + expStatus v1alpha1.PeeringDialerStatus + }{ + { + name: "updates status when there's no existing status", + dialer: &v1alpha1.PeeringDialer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dialer", + Namespace: "default", + }, + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "dialer-secret", + Key: "data", + Backend: "kubernetes", + }, + }, + }, + }, + reconcileErr: errors.New("this is an error"), + expStatus: v1alpha1.PeeringDialerStatus{ + ReconcileError: &v1alpha1.ReconcileErrorStatus{ + Error: pointerToBool(true), + Message: pointerToString("this is an error"), + }, + }, + }, + { + name: "updates status when there is an existing status", + dialer: &v1alpha1.PeeringDialer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dialer", + Namespace: "default", + }, + Spec: v1alpha1.PeeringDialerSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "dialer-secret", + Key: "data", + Backend: "kubernetes", + }, + }, + }, + Status: v1alpha1.PeeringDialerStatus{ + ReconcileError: &v1alpha1.ReconcileErrorStatus{ + Error: pointerToBool(false), + Message: pointerToString(""), + }, + }, + }, + reconcileErr: errors.New("this is an error"), + expStatus: v1alpha1.PeeringDialerStatus{ + ReconcileError: &v1alpha1.ReconcileErrorStatus{ + Error: pointerToBool(true), + Message: pointerToString("this is an error"), + }, + }, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // Add the default namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + // Create fake k8s client. + k8sObjects := []runtime.Object{&ns} + k8sObjects = append(k8sObjects, tt.dialer) + + // Add peering types to the scheme. + s := scheme.Scheme + s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringDialer{}, &v1alpha1.PeeringDialerList{}) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() + // Create the peering dialer controller. + controller := &PeeringDialerController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + Scheme: s, + } + + controller.updateStatusError(context.Background(), tt.dialer, tt.reconcileErr) + + dialer := &v1alpha1.PeeringDialer{} + dialerName := types.NamespacedName{ + Name: "dialer", + Namespace: "default", + } + err := fakeClient.Get(context.Background(), dialerName, dialer) + require.NoError(t, err) + require.Equal(t, *tt.expStatus.ReconcileError.Error, *dialer.Status.ReconcileError.Error) + + }) + } +} diff --git a/control-plane/subcommand/inject-connect/command.go b/control-plane/subcommand/inject-connect/command.go index cfd11e4e95..9b880f16ab 100644 --- a/control-plane/subcommand/inject-connect/command.go +++ b/control-plane/subcommand/inject-connect/command.go @@ -455,7 +455,7 @@ func (c *Command) Run(args []string) int { mgr.GetWebhookServer().CertDir = c.flagCertDir mgr.GetWebhookServer().Register("/mutate", - &webhook.Admission{Handler: &connectinject.Handler{ + &webhook.Admission{Handler: &connectinject.ConnectWebhook{ Clientset: c.clientset, ConsulClient: c.consulClient, ImageConsul: c.flagConsulImage,