diff --git a/.changelog/3813.txt b/.changelog/3813.txt new file mode 100644 index 0000000000..59ef045467 --- /dev/null +++ b/.changelog/3813.txt @@ -0,0 +1,3 @@ +```release-note:improvement +control-plane: Remove anyuid Security Context Constraints (SCC) requirement in OpenShift. +``` diff --git a/acceptance/framework/consul/helm_cluster.go b/acceptance/framework/consul/helm_cluster.go index 52cf7683c1..6dd1df4f67 100644 --- a/acceptance/framework/consul/helm_cluster.go +++ b/acceptance/framework/consul/helm_cluster.go @@ -165,7 +165,12 @@ func (h *HelmCluster) Create(t *testing.T) { require.NoError(r, err) }) - k8s.WaitForAllPodsToBeReady(t, h.kubernetesClient, h.helmOptions.KubectlOptions.Namespace, fmt.Sprintf("release=%s", h.releaseName)) + k8s.WaitForAllPodsToBeReady( + t, + h.kubernetesClient, + h.helmOptions.KubectlOptions.Namespace, + fmt.Sprintf("release=%s", h.releaseName), + ) } func (h *HelmCluster) Destroy(t *testing.T) { @@ -187,7 +192,11 @@ func (h *HelmCluster) Destroy(t *testing.T) { require.NoError(t, err) // Forcibly delete all gateway classes and remove their finalizers. - _ = h.runtimeClient.DeleteAllOf(context.Background(), &gwv1beta1.GatewayClass{}, client.HasLabels{"release=" + h.releaseName}) + _ = h.runtimeClient.DeleteAllOf( + context.Background(), + &gwv1beta1.GatewayClass{}, + client.HasLabels{"release=" + h.releaseName}, + ) var gatewayClassList gwv1beta1.GatewayClassList if h.runtimeClient.List(context.Background(), &gatewayClassList, &client.ListOptions{ @@ -200,7 +209,11 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Forcibly delete all gateway class configs and remove their finalizers. - _ = h.runtimeClient.DeleteAllOf(context.Background(), &v1alpha1.GatewayClassConfig{}, client.HasLabels{"release=" + h.releaseName}) + _ = h.runtimeClient.DeleteAllOf( + context.Background(), + &v1alpha1.GatewayClassConfig{}, + client.HasLabels{"release=" + h.releaseName}, + ) var gatewayClassConfigList v1alpha1.GatewayClassConfigList if h.runtimeClient.List(context.Background(), &gatewayClassConfigList, &client.ListOptions{ @@ -220,16 +233,19 @@ func (h *HelmCluster) Destroy(t *testing.T) { // Retry because sometimes certain resources (like PVC) take time to delete // in cloud providers. retry.RunWith(&retry.Counter{Wait: 2 * time.Second, Count: 600}, t, func(r *retry.R) { - // Force delete any pods that have h.releaseName in their name because sometimes // graceful termination takes a long time and since this is an uninstall // we don't care that they're stopped gracefully. - pods, err := h.kubernetesClient.CoreV1().Pods(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + pods, err := h.kubernetesClient.CoreV1(). + Pods(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, pod := range pods.Items { if strings.Contains(pod.Name, h.releaseName) { var gracePeriod int64 = 0 - err := h.kubernetesClient.CoreV1().Pods(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}) + err := h.kubernetesClient.CoreV1(). + Pods(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -237,11 +253,15 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete any deployments that have h.releaseName in their name. - deployments, err := h.kubernetesClient.AppsV1().Deployments(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + deployments, err := h.kubernetesClient.AppsV1(). + Deployments(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, deployment := range deployments.Items { if strings.Contains(deployment.Name, h.releaseName) { - err := h.kubernetesClient.AppsV1().Deployments(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), deployment.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.AppsV1(). + Deployments(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), deployment.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -249,11 +269,15 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete any replicasets that have h.releaseName in their name. - replicasets, err := h.kubernetesClient.AppsV1().ReplicaSets(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + replicasets, err := h.kubernetesClient.AppsV1(). + ReplicaSets(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, replicaset := range replicasets.Items { if strings.Contains(replicaset.Name, h.releaseName) { - err := h.kubernetesClient.AppsV1().ReplicaSets(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), replicaset.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.AppsV1(). + ReplicaSets(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), replicaset.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -261,11 +285,15 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete any statefulsets that have h.releaseName in their name. - statefulsets, err := h.kubernetesClient.AppsV1().StatefulSets(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + statefulsets, err := h.kubernetesClient.AppsV1(). + StatefulSets(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, statefulset := range statefulsets.Items { if strings.Contains(statefulset.Name, h.releaseName) { - err := h.kubernetesClient.AppsV1().StatefulSets(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), statefulset.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.AppsV1(). + StatefulSets(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), statefulset.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -273,11 +301,15 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete any daemonsets that have h.releaseName in their name. - daemonsets, err := h.kubernetesClient.AppsV1().DaemonSets(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + daemonsets, err := h.kubernetesClient.AppsV1(). + DaemonSets(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, daemonset := range daemonsets.Items { if strings.Contains(daemonset.Name, h.releaseName) { - err := h.kubernetesClient.AppsV1().DaemonSets(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), daemonset.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.AppsV1(). + DaemonSets(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), daemonset.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -285,11 +317,15 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete any services that have h.releaseName in their name. - services, err := h.kubernetesClient.CoreV1().Services(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + services, err := h.kubernetesClient.CoreV1(). + Services(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, service := range services.Items { if strings.Contains(service.Name, h.releaseName) { - err := h.kubernetesClient.CoreV1().Services(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), service.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.CoreV1(). + Services(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), service.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -297,15 +333,21 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete PVCs. - err = h.kubernetesClient.CoreV1().PersistentVolumeClaims(h.helmOptions.KubectlOptions.Namespace).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + err = h.kubernetesClient.CoreV1(). + PersistentVolumeClaims(h.helmOptions.KubectlOptions.Namespace). + DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) // Delete any serviceaccounts that have h.releaseName in their name. - sas, err := h.kubernetesClient.CoreV1().ServiceAccounts(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + sas, err := h.kubernetesClient.CoreV1(). + ServiceAccounts(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, sa := range sas.Items { if strings.Contains(sa.Name, h.releaseName) { - err := h.kubernetesClient.CoreV1().ServiceAccounts(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), sa.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.CoreV1(). + ServiceAccounts(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), sa.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -313,11 +355,15 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete any roles that have h.releaseName in their name. - roles, err := h.kubernetesClient.RbacV1().Roles(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + roles, err := h.kubernetesClient.RbacV1(). + Roles(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, role := range roles.Items { if strings.Contains(role.Name, h.releaseName) { - err := h.kubernetesClient.RbacV1().Roles(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), role.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.RbacV1(). + Roles(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), role.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -325,11 +371,15 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete any rolebindings that have h.releaseName in their name. - roleBindings, err := h.kubernetesClient.RbacV1().RoleBindings(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + roleBindings, err := h.kubernetesClient.RbacV1(). + RoleBindings(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, roleBinding := range roleBindings.Items { if strings.Contains(roleBinding.Name, h.releaseName) { - err := h.kubernetesClient.RbacV1().RoleBindings(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), roleBinding.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.RbacV1(). + RoleBindings(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), roleBinding.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -337,11 +387,15 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete any secrets that have h.releaseName in their name. - secrets, err := h.kubernetesClient.CoreV1().Secrets(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{}) + secrets, err := h.kubernetesClient.CoreV1(). + Secrets(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{}) require.NoError(r, err) for _, secret := range secrets.Items { if strings.Contains(secret.Name, h.releaseName) { - err := h.kubernetesClient.CoreV1().Secrets(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), secret.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.CoreV1(). + Secrets(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), secret.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -349,11 +403,15 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Delete any jobs that have h.releaseName in their name. - jobs, err := h.kubernetesClient.BatchV1().Jobs(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + jobs, err := h.kubernetesClient.BatchV1(). + Jobs(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, job := range jobs.Items { if strings.Contains(job.Name, h.releaseName) { - err := h.kubernetesClient.BatchV1().Jobs(h.helmOptions.KubectlOptions.Namespace).Delete(context.Background(), job.Name, metav1.DeleteOptions{}) + err := h.kubernetesClient.BatchV1(). + Jobs(h.helmOptions.KubectlOptions.Namespace). + Delete(context.Background(), job.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { require.NoError(r, err) } @@ -361,7 +419,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify that all deployments have been deleted. - deployments, err = h.kubernetesClient.AppsV1().Deployments(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + deployments, err = h.kubernetesClient.AppsV1(). + Deployments(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, deployment := range deployments.Items { if strings.Contains(deployment.Name, h.releaseName) { @@ -370,7 +430,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify that all replicasets have been deleted. - replicasets, err = h.kubernetesClient.AppsV1().ReplicaSets(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + replicasets, err = h.kubernetesClient.AppsV1(). + ReplicaSets(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, replicaset := range replicasets.Items { if strings.Contains(replicaset.Name, h.releaseName) { @@ -379,7 +441,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify that all statefulets have been deleted. - statefulsets, err = h.kubernetesClient.AppsV1().StatefulSets(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + statefulsets, err = h.kubernetesClient.AppsV1(). + StatefulSets(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, statefulset := range statefulsets.Items { if strings.Contains(statefulset.Name, h.releaseName) { @@ -388,7 +452,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify that all daemonsets have been deleted. - daemonsets, err = h.kubernetesClient.AppsV1().DaemonSets(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + daemonsets, err = h.kubernetesClient.AppsV1(). + DaemonSets(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, daemonset := range daemonsets.Items { if strings.Contains(daemonset.Name, h.releaseName) { @@ -397,7 +463,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify that all services have been deleted. - services, err = h.kubernetesClient.CoreV1().Services(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + services, err = h.kubernetesClient.CoreV1(). + Services(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, service := range services.Items { if strings.Contains(service.Name, h.releaseName) { @@ -406,7 +474,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify all Consul Pods are deleted. - pods, err = h.kubernetesClient.CoreV1().Pods(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + pods, err = h.kubernetesClient.CoreV1(). + Pods(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, pod := range pods.Items { if strings.Contains(pod.Name, h.releaseName) { @@ -415,12 +485,16 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify all Consul PVCs are deleted. - pvcs, err := h.kubernetesClient.CoreV1().PersistentVolumeClaims(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + pvcs, err := h.kubernetesClient.CoreV1(). + PersistentVolumeClaims(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) require.Len(r, pvcs.Items, 0) // Verify all Consul Service Accounts are deleted. - sas, err = h.kubernetesClient.CoreV1().ServiceAccounts(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + sas, err = h.kubernetesClient.CoreV1(). + ServiceAccounts(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, sa := range sas.Items { if strings.Contains(sa.Name, h.releaseName) { @@ -429,7 +503,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify all Consul Roles are deleted. - roles, err = h.kubernetesClient.RbacV1().Roles(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + roles, err = h.kubernetesClient.RbacV1(). + Roles(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, role := range roles.Items { if strings.Contains(role.Name, h.releaseName) { @@ -438,7 +514,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify all Consul Role Bindings are deleted. - roleBindings, err = h.kubernetesClient.RbacV1().RoleBindings(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + roleBindings, err = h.kubernetesClient.RbacV1(). + RoleBindings(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, roleBinding := range roleBindings.Items { if strings.Contains(roleBinding.Name, h.releaseName) { @@ -447,7 +525,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify all Consul Secrets are deleted. - secrets, err = h.kubernetesClient.CoreV1().Secrets(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{}) + secrets, err = h.kubernetesClient.CoreV1(). + Secrets(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{}) require.NoError(r, err) for _, secret := range secrets.Items { if strings.Contains(secret.Name, h.releaseName) { @@ -456,7 +536,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { } // Verify all Consul Jobs are deleted. - jobs, err = h.kubernetesClient.BatchV1().Jobs(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) + jobs, err = h.kubernetesClient.BatchV1(). + Jobs(h.helmOptions.KubectlOptions.Namespace). + List(context.Background(), metav1.ListOptions{LabelSelector: "release=" + h.releaseName}) require.NoError(r, err) for _, job := range jobs.Items { if strings.Contains(job.Name, h.releaseName) { @@ -475,7 +557,12 @@ func (h *HelmCluster) Upgrade(t *testing.T, helmValues map[string]string) { chartName = config.HelmChartPath } helm.Upgrade(t, h.helmOptions, chartName, h.releaseName) - k8s.WaitForAllPodsToBeReady(t, h.kubernetesClient, h.helmOptions.KubectlOptions.Namespace, fmt.Sprintf("release=%s", h.releaseName)) + k8s.WaitForAllPodsToBeReady( + t, + h.kubernetesClient, + h.helmOptions.KubectlOptions.Namespace, + fmt.Sprintf("release=%s", h.releaseName), + ) } // CreatePortForwardTunnel returns the local address:port of a tunnel to the consul server pod in the given release. @@ -508,7 +595,11 @@ func (h *HelmCluster) ResourceClient(t *testing.T, secure bool, release ...strin return resourceClient } -func (h *HelmCluster) SetupConsulClient(t *testing.T, secure bool, release ...string) (client *api.Client, configAddress string) { +func (h *HelmCluster) SetupConsulClient( + t *testing.T, + secure bool, + release ...string, +) (client *api.Client, configAddress string) { t.Helper() releaseName := h.releaseName @@ -550,7 +641,6 @@ func (h *HelmCluster) SetupConsulClient(t *testing.T, secure bool, release ...st require.NoError(r, err) } }) - } } @@ -654,7 +744,9 @@ func configurePodSecurityPolicies(t *testing.T, client kubernetes.Interface, cfg }, } - _, err = client.RbacV1().RoleBindings(namespace).Create(context.Background(), pspRoleBinding, metav1.CreateOptions{}) + _, err = client.RbacV1(). + RoleBindings(namespace). + Create(context.Background(), pspRoleBinding, metav1.CreateOptions{}) require.NoError(t, err) } else { require.NoError(t, err) @@ -698,51 +790,51 @@ func configureNamespace(t *testing.T, client kubernetes.Interface, cfg *config.T return } - require.Failf(t, "Failed to create or update namespace", "Namespace=%s, CreateError=%s, UpdateError=%s", namespace, createErr, updateErr) + require.Failf( + t, + "Failed to create or update namespace", + "Namespace=%s, CreateError=%s, UpdateError=%s", + namespace, + createErr, + updateErr, + ) } // configureSCCs creates RoleBindings that bind the default service account to cluster roles -// allowing access to the anyuid and privileged Security Context Constraints on OpenShift. +// allowing access to the privileged Security Context Constraints on OpenShift. func configureSCCs(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace string) { - const anyuidClusterRole = "system:openshift:scc:anyuid" const privilegedClusterRole = "system:openshift:scc:privileged" - anyuidRoleBinding := "anyuid-test" privilegedRoleBinding := "privileged-test" // A role binding to allow default service account in the installation namespace access to the SCCs. - { - for clusterRoleName, roleBindingName := range map[string]string{anyuidClusterRole: anyuidRoleBinding, privilegedClusterRole: privilegedRoleBinding} { - // Check if this cluster role binding already exists. - _, err := client.RbacV1().RoleBindings(namespace).Get(context.Background(), roleBindingName, metav1.GetOptions{}) - - if errors.IsNotFound(err) { - roleBinding := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: roleBindingName, - }, - Subjects: []rbacv1.Subject{ - { - Kind: rbacv1.ServiceAccountKind, - Name: "default", - Namespace: namespace, - }, - }, - RoleRef: rbacv1.RoleRef{ - Kind: "ClusterRole", - Name: clusterRoleName, - }, - } - - _, err = client.RbacV1().RoleBindings(namespace).Create(context.Background(), roleBinding, metav1.CreateOptions{}) - require.NoError(t, err) - } else { - require.NoError(t, err) - } + // Check if this cluster role binding already exists. + _, err := client.RbacV1().RoleBindings(namespace).Get(context.Background(), privilegedRoleBinding, metav1.GetOptions{}) + + if errors.IsNotFound(err) { + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: privilegedRoleBinding, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: "default", + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: privilegedClusterRole, + }, } + + _, err = client.RbacV1().RoleBindings(namespace).Create(context.Background(), roleBinding, metav1.CreateOptions{}) + require.NoError(t, err) + } else { + require.NoError(t, err) } helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { - _ = client.RbacV1().RoleBindings(namespace).Delete(context.Background(), anyuidRoleBinding, metav1.DeleteOptions{}) _ = client.RbacV1().RoleBindings(namespace).Delete(context.Background(), privilegedRoleBinding, metav1.DeleteOptions{}) }) } @@ -767,8 +859,12 @@ func defaultValues() map[string]string { return values } -func CreateK8sSecret(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace, secretName, secretKey, secret string) { - +func CreateK8sSecret( + t *testing.T, + client kubernetes.Interface, + cfg *config.TestConfig, + namespace, secretName, secretKey, secret string, +) { retry.RunWith(&retry.Counter{Wait: 2 * time.Second, Count: 15}, t, func(r *retry.R) { _, err := client.CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{}) if errors.IsNotFound(err) { diff --git a/acceptance/tests/fixtures/bases/multiport-app/anyuid-scc-rolebinding.yaml b/acceptance/tests/fixtures/bases/multiport-app/anyuid-scc-rolebinding.yaml deleted file mode 100644 index 5c2e0dcfa2..0000000000 --- a/acceptance/tests/fixtures/bases/multiport-app/anyuid-scc-rolebinding.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: multiport-openshift-anyuid -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:openshift:scc:anyuid -subjects: - - kind: ServiceAccount - name: multiport ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: multiport-admin-openshift-anyuid -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:openshift:scc:anyuid -subjects: - - kind: ServiceAccount - name: multiport-admin diff --git a/acceptance/tests/fixtures/bases/multiport-app/kustomization.yaml b/acceptance/tests/fixtures/bases/multiport-app/kustomization.yaml index fb792d63a7..ecd2015a34 100644 --- a/acceptance/tests/fixtures/bases/multiport-app/kustomization.yaml +++ b/acceptance/tests/fixtures/bases/multiport-app/kustomization.yaml @@ -7,5 +7,4 @@ resources: - secret.yaml - serviceaccount.yaml - psp-rolebinding.yaml - - anyuid-scc-rolebinding.yaml - - privileged-scc-rolebinding.yaml \ No newline at end of file + - privileged-scc-rolebinding.yaml diff --git a/acceptance/tests/fixtures/bases/static-client/anyuid-scc-rolebinding.yaml b/acceptance/tests/fixtures/bases/static-client/anyuid-scc-rolebinding.yaml deleted file mode 100644 index b80bc5c562..0000000000 --- a/acceptance/tests/fixtures/bases/static-client/anyuid-scc-rolebinding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: static-client-openshift-anyuid -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:openshift:scc:anyuid -subjects: - - kind: ServiceAccount - name: static-client \ No newline at end of file diff --git a/acceptance/tests/fixtures/bases/static-client/kustomization.yaml b/acceptance/tests/fixtures/bases/static-client/kustomization.yaml index 9aa0009dc4..929d64ac24 100644 --- a/acceptance/tests/fixtures/bases/static-client/kustomization.yaml +++ b/acceptance/tests/fixtures/bases/static-client/kustomization.yaml @@ -6,5 +6,4 @@ resources: - service.yaml - serviceaccount.yaml - psp-rolebinding.yaml - - anyuid-scc-rolebinding.yaml - - privileged-scc-rolebinding.yaml \ No newline at end of file + - privileged-scc-rolebinding.yaml diff --git a/acceptance/tests/fixtures/bases/static-server-https/anyuid-scc-rolebinding.yaml b/acceptance/tests/fixtures/bases/static-server-https/anyuid-scc-rolebinding.yaml deleted file mode 100644 index 2be7cf13db..0000000000 --- a/acceptance/tests/fixtures/bases/static-server-https/anyuid-scc-rolebinding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: static-server-openshift-anyuid -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:openshift:scc:anyuid -subjects: - - kind: ServiceAccount - name: static-server \ No newline at end of file diff --git a/acceptance/tests/fixtures/bases/static-server-https/kustomization.yaml b/acceptance/tests/fixtures/bases/static-server-https/kustomization.yaml index da166af201..6d7daa8f88 100644 --- a/acceptance/tests/fixtures/bases/static-server-https/kustomization.yaml +++ b/acceptance/tests/fixtures/bases/static-server-https/kustomization.yaml @@ -7,5 +7,4 @@ resources: - service.yaml - serviceaccount.yaml - psp-rolebinding.yaml - - anyuid-scc-rolebinding.yaml - - privileged-scc-rolebinding.yaml \ No newline at end of file + - privileged-scc-rolebinding.yaml diff --git a/acceptance/tests/fixtures/bases/static-server-tcp/anyuid-scc-rolebinding.yaml b/acceptance/tests/fixtures/bases/static-server-tcp/anyuid-scc-rolebinding.yaml deleted file mode 100644 index eb86dc8bae..0000000000 --- a/acceptance/tests/fixtures/bases/static-server-tcp/anyuid-scc-rolebinding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: static-server-tcp-openshift-anyuid -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:openshift:scc:anyuid -subjects: - - kind: ServiceAccount - name: static-server-tcp \ No newline at end of file diff --git a/acceptance/tests/fixtures/bases/static-server-tcp/kustomization.yaml b/acceptance/tests/fixtures/bases/static-server-tcp/kustomization.yaml index 2180aa94e1..946e8d6b68 100644 --- a/acceptance/tests/fixtures/bases/static-server-tcp/kustomization.yaml +++ b/acceptance/tests/fixtures/bases/static-server-tcp/kustomization.yaml @@ -7,5 +7,4 @@ resources: - serviceaccount.yaml - servicedefaults.yaml - psp-rolebinding.yaml - - anyuid-scc-rolebinding.yaml - - privileged-scc-rolebinding.yaml \ No newline at end of file + - privileged-scc-rolebinding.yaml diff --git a/acceptance/tests/fixtures/bases/static-server/anyuid-scc-rolebinding.yaml b/acceptance/tests/fixtures/bases/static-server/anyuid-scc-rolebinding.yaml deleted file mode 100644 index 2be7cf13db..0000000000 --- a/acceptance/tests/fixtures/bases/static-server/anyuid-scc-rolebinding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: static-server-openshift-anyuid -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:openshift:scc:anyuid -subjects: - - kind: ServiceAccount - name: static-server \ No newline at end of file diff --git a/acceptance/tests/fixtures/bases/static-server/kustomization.yaml b/acceptance/tests/fixtures/bases/static-server/kustomization.yaml index 9aa0009dc4..929d64ac24 100644 --- a/acceptance/tests/fixtures/bases/static-server/kustomization.yaml +++ b/acceptance/tests/fixtures/bases/static-server/kustomization.yaml @@ -6,5 +6,4 @@ resources: - service.yaml - serviceaccount.yaml - psp-rolebinding.yaml - - anyuid-scc-rolebinding.yaml - - privileged-scc-rolebinding.yaml \ No newline at end of file + - privileged-scc-rolebinding.yaml diff --git a/control-plane/connect-inject/common/openshift.go b/control-plane/connect-inject/common/openshift.go new file mode 100644 index 0000000000..e8e2f555e8 --- /dev/null +++ b/control-plane/connect-inject/common/openshift.go @@ -0,0 +1,130 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Function copied from: +// https://github.com/openshift/apiserver-library-go/blob/release-4.17/pkg/securitycontextconstraints/sccmatching/matcher.go +// Apache 2.0 license: https://github.com/openshift/apiserver-library-go/blob/release-4.17/LICENSE + +// A namespace in OpenShift has the following annotations: +// Annotations: openshift.io/sa.scc.mcs: s0:c27,c4 +// openshift.io/sa.scc.uid-range: 1000710000/10000 +// openshift.io/sa.scc.supplemental-groups: 1000710000/10000 +// +// Note: Even though the annotation is named 'range', it is not a range but the ID you should use. All pods in a +// namespace should use the same UID/GID. (1000710000/1000710000 above) + +package common + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + corev1 "k8s.io/api/core/v1" +) + +// GetOpenShiftUID gets the user id from the OpenShift annotation 'openshift.io/sa.scc.uid-range'. +func GetOpenShiftUID(ns *corev1.Namespace) (int64, error) { + annotation, ok := ns.Annotations[constants.AnnotationOpenShiftUIDRange] + if !ok { + return 0, fmt.Errorf("unable to find annotation %s", constants.AnnotationOpenShiftUIDRange) + } + if len(annotation) == 0 { + return 0, fmt.Errorf("found annotation %s but it was empty", constants.AnnotationOpenShiftUIDRange) + } + + uid, err := parseOpenShiftUID(annotation) + if err != nil { + return 0, err + } + + return uid, nil +} + +// parseOpenShiftUID parses the UID "range" from the annotation string. The annotation can either have a '/' or '-' +// as a separator. '-' is the old style of UID from when it used to be an actual range. +// Example annotation value: "1000700000/100000". +func parseOpenShiftUID(val string) (int64, error) { + var uid int64 + var err error + if strings.Contains(val, "/") { + str := strings.Split(val, "/") + uid, err = strconv.ParseInt(str[0], 10, 64) + if err != nil { + return 0, err + } + } + if strings.Contains(val, "-") { + str := strings.Split(val, "-") + uid, err = strconv.ParseInt(str[0], 10, 64) + if err != nil { + return 0, err + } + } + + if !strings.Contains(val, "/") && !strings.Contains(val, "-") { + return 0, fmt.Errorf( + "annotation %s contains an invalid format for value %s", + constants.AnnotationOpenShiftUIDRange, + val, + ) + } + + return uid, nil +} + +// GetOpenShiftGroup gets the group from OpenShift annotation 'openshift.io/sa.scc.supplemental-groups' +// Fall back to the UID annotation if the group annotation does not exist. The values should +// be the same. +func GetOpenShiftGroup(ns *corev1.Namespace) (int64, error) { + annotation, ok := ns.Annotations[constants.AnnotationOpenShiftGroups] + if !ok { + // fall back to UID annotation + annotation, ok = ns.Annotations[constants.AnnotationOpenShiftUIDRange] + if !ok { + return 0, fmt.Errorf( + "unable to find annotation %s or %s", + constants.AnnotationOpenShiftGroups, + constants.AnnotationOpenShiftUIDRange, + ) + } + } + if len(annotation) == 0 { + return 0, fmt.Errorf("found annotation %s but it was empty", constants.AnnotationOpenShiftGroups) + } + + uid, err := parseOpenShiftGroup(annotation) + if err != nil { + return 0, err + } + + return uid, nil +} + +// parseOpenShiftGroup parses the group from the annotation string. The annotation can either have a '/' or ',' +// as a separator. ',' is the old style of UID from when it used to be an actual range. +func parseOpenShiftGroup(val string) (int64, error) { + var group int64 + var err error + if strings.Contains(val, "/") { + str := strings.Split(val, "/") + group, err = strconv.ParseInt(str[0], 10, 64) + if err != nil { + return 0, err + } + } + if strings.Contains(val, ",") { + str := strings.Split(val, ",") + group, err = strconv.ParseInt(str[0], 10, 64) + if err != nil { + return 0, err + } + } + + if !strings.Contains(val, "/") && !strings.Contains(val, ",") { + return 0, fmt.Errorf("annotation %s contains an invalid format for value %s", constants.AnnotationOpenShiftGroups, val) + } + + return group, nil +} diff --git a/control-plane/connect-inject/common/openshift_test.go b/control-plane/connect-inject/common/openshift_test.go new file mode 100644 index 0000000000..e4a5178c7a --- /dev/null +++ b/control-plane/connect-inject/common/openshift_test.go @@ -0,0 +1,236 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package common + +import ( + "fmt" + "testing" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestOpenShiftUID(t *testing.T) { + cases := []struct { + Name string + Namespace func() *corev1.Namespace + Expected int64 + Err string + }{ + { + Name: "Valid uid annotation with slash", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + Annotations: map[string]string{ + constants.AnnotationOpenShiftUIDRange: "1000700000/100000", + }, + }, + } + return ns + }, + Expected: 1000700000, + Err: "", + }, + { + Name: "Valid uid annotation with dash", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + Annotations: map[string]string{ + constants.AnnotationOpenShiftUIDRange: "1234-1000", + }, + }, + } + return ns + }, + Expected: 1234, + Err: "", + }, + { + Name: "Invalid uid annotation missing slash or dash", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + Annotations: map[string]string{ + // annotation should have a slash '/' or dash '-' + constants.AnnotationOpenShiftUIDRange: "5678", + }, + }, + } + return ns + }, + Expected: 0, + Err: fmt.Sprintf( + "annotation %s contains an invalid format for value %s", + constants.AnnotationOpenShiftUIDRange, + "5678", + ), + }, + { + Name: "Missing uid annotation", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + }, + } + return ns + }, + Expected: 0, + Err: fmt.Sprintf("unable to find annotation %s", constants.AnnotationOpenShiftUIDRange), + }, + { + Name: "Empty", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + Annotations: map[string]string{ + constants.AnnotationOpenShiftUIDRange: "", + }, + }, + } + return ns + }, + Expected: 0, + Err: "found annotation openshift.io/sa.scc.uid-range but it was empty", + }, + } + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + actual, err := GetOpenShiftUID(tt.Namespace()) + if tt.Err == "" { + require.NoError(err) + require.Equal(tt.Expected, actual) + } else { + require.EqualError(err, tt.Err) + } + }) + } +} + +func TestOpenShiftGroup(t *testing.T) { + cases := []struct { + Name string + Namespace func() *corev1.Namespace + Expected int64 + Err string + }{ + { + Name: "Valid group annotation with slash", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + Annotations: map[string]string{ + constants.AnnotationOpenShiftGroups: "123456789/1000", + }, + }, + } + return ns + }, + Expected: 123456789, + Err: "", + }, + { + Name: "Valid group annotation with comma", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + Annotations: map[string]string{ + constants.AnnotationOpenShiftGroups: "1234,1000", + }, + }, + } + return ns + }, + Expected: 1234, + Err: "", + }, + { + Name: "Invalid group annotation missing slash or comma", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + Annotations: map[string]string{ + // annotation should have a slash '/' or comma ',' + constants.AnnotationOpenShiftGroups: "5678", + }, + }, + } + return ns + }, + Expected: 0, + Err: fmt.Sprintf( + "annotation %s contains an invalid format for value %s", + constants.AnnotationOpenShiftGroups, + "5678", + ), + }, + { + Name: "Missing group annotation, fall back to UID annotation", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + Annotations: map[string]string{ + // annotation should have a slash '/' or comma ',' + constants.AnnotationOpenShiftUIDRange: "9012/1000", + }, + }, + } + return ns + }, + Expected: 9012, + Err: "", + }, + { + Name: "Missing both group and fallback uid annotation", + Namespace: func() *corev1.Namespace { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + }, + } + return ns + }, + Expected: 0, + Err: fmt.Sprintf( + "unable to find annotation %s or %s", + constants.AnnotationOpenShiftGroups, + constants.AnnotationOpenShiftUIDRange, + ), + }, + } + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + actual, err := GetOpenShiftGroup(tt.Namespace()) + if tt.Err == "" { + require.NoError(err) + require.Equal(tt.Expected, actual) + } else { + require.EqualError(err, tt.Err) + } + }) + } +} diff --git a/control-plane/connect-inject/constants/annotations_and_labels.go b/control-plane/connect-inject/constants/annotations_and_labels.go index 82d2e01ce0..ccaeecabe5 100644 --- a/control-plane/connect-inject/constants/annotations_and_labels.go +++ b/control-plane/connect-inject/constants/annotations_and_labels.go @@ -227,3 +227,9 @@ const ( AnnotationPrometheusPath = "prometheus.io/path" AnnotationPrometheusPort = "prometheus.io/port" ) + +// Annotations used by OpenShift. +const ( + AnnotationOpenShiftGroups = "openshift.io/sa.scc.supplemental-groups" + AnnotationOpenShiftUIDRange = "openshift.io/sa.scc.uid-range" +) diff --git a/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go b/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go index 0fec29f1ef..7e5427cd1c 100644 --- a/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go +++ b/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go @@ -23,7 +23,11 @@ const ( consulDataplaneDNSBindPort = 8600 ) -func (w *MeshWebhook) consulDataplaneSidecar(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { +func (w *MeshWebhook) consulDataplaneSidecar( + namespace corev1.Namespace, + pod corev1.Pod, + mpi multiPortInfo, +) (corev1.Container, error) { resources, err := w.sidecarResources(pod) if err != nil { return corev1.Container{}, err @@ -204,33 +208,67 @@ func (w *MeshWebhook) consulDataplaneSidecar(namespace corev1.Namespace, pod cor // When transparent proxy is enabled, then consul-dataplane needs to run as our specific user // so that traffic redirection will work. if tproxyEnabled || !w.EnableOpenShift { - if pod.Spec.SecurityContext != nil { - // User container and consul-dataplane container cannot have the same UID. - if pod.Spec.SecurityContext.RunAsUser != nil && *pod.Spec.SecurityContext.RunAsUser == sidecarUserAndGroupID { - return corev1.Container{}, fmt.Errorf("pod's security context cannot have the same UID as consul-dataplane: %v", sidecarUserAndGroupID) + // In non-OpenShift environments we set the User and group ID for the sidecar to our values. + if !w.EnableOpenShift { + if pod.Spec.SecurityContext != nil { + // User container and consul-dataplane container cannot have the same UID. + if pod.Spec.SecurityContext.RunAsUser != nil && *pod.Spec.SecurityContext.RunAsUser == sidecarUserAndGroupID { + return corev1.Container{}, fmt.Errorf( + "pod's security context cannot have the same UID as consul-dataplane: %v", + sidecarUserAndGroupID, + ) + } } - } - // Ensure that none of the user's containers have the same UID as consul-dataplane. At this point in injection the meshWebhook - // has only injected init containers so all containers defined in pod.Spec.Containers are from the user. - for _, c := range pod.Spec.Containers { - // User container and consul-dataplane container cannot have the same UID. - if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil && *c.SecurityContext.RunAsUser == sidecarUserAndGroupID && c.Image != w.ImageConsulDataplane { - return corev1.Container{}, fmt.Errorf("container %q has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", c.Name, sidecarUserAndGroupID) + // Ensure that none of the user's containers have the same UID as consul-dataplane. At this point in injection the meshWebhook + // has only injected init containers so all containers defined in pod.Spec.Containers are from the user. + for _, c := range pod.Spec.Containers { + // User container and consul-dataplane container cannot have the same UID. + if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil && + *c.SecurityContext.RunAsUser == sidecarUserAndGroupID && + c.Image != w.ImageConsulDataplane { + return corev1.Container{}, fmt.Errorf( + "container %q has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", + c.Name, + sidecarUserAndGroupID, + ) + } + } + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(sidecarUserAndGroupID), + RunAsGroup: pointer.Int64(sidecarUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + ReadOnlyRootFilesystem: pointer.Bool(true), + } + } else { + // Transparent proxy is set in OpenShift. There is an annotation on the namespace that tells us what + // the user and group ids should be for the sidecar. + uid, err := common.GetOpenShiftUID(&namespace) + if err != nil { + return corev1.Container{}, err + } + group, err := common.GetOpenShiftGroup(&namespace) + if err != nil { + return corev1.Container{}, err + } + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(uid), + RunAsGroup: pointer.Int64(group), + RunAsNonRoot: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + ReadOnlyRootFilesystem: pointer.Bool(true), } - } - container.SecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - AllowPrivilegeEscalation: pointer.Bool(false), - ReadOnlyRootFilesystem: pointer.Bool(true), } } - return container, nil } -func (w *MeshWebhook) getContainerSidecarArgs(namespace corev1.Namespace, mpi multiPortInfo, bearerTokenFile string, pod corev1.Pod) ([]string, error) { +func (w *MeshWebhook) getContainerSidecarArgs( + namespace corev1.Namespace, + mpi multiPortInfo, + bearerTokenFile string, + pod corev1.Pod, +) ([]string, error) { proxyIDFileName := "/consul/connect-inject/proxyid" if mpi.serviceName != "" { proxyIDFileName = fmt.Sprintf("/consul/connect-inject/proxyid-%s", mpi.serviceName) @@ -365,7 +403,14 @@ func (w *MeshWebhook) getContainerSidecarArgs(namespace corev1.Namespace, mpi mu } if serviceMetricsPath != "" && serviceMetricsPort != "" { - args = append(args, "-telemetry-prom-service-metrics-url="+fmt.Sprintf("http://127.0.0.1:%s%s", serviceMetricsPort, serviceMetricsPath)) + args = append( + args, + "-telemetry-prom-service-metrics-url="+fmt.Sprintf( + "http://127.0.0.1:%s%s", + serviceMetricsPort, + serviceMetricsPath, + ), + ) } // Pull the TLS config from the relevant annotations. @@ -392,13 +437,23 @@ func (w *MeshWebhook) getContainerSidecarArgs(namespace corev1.Namespace, mpi mu // Validate required Prometheus TLS config is present if set. if prometheusCAFile != "" || prometheusCAPath != "" || prometheusCertFile != "" || prometheusKeyFile != "" { if prometheusCAFile == "" && prometheusCAPath == "" { - return nil, fmt.Errorf("must set one of %q or %q when providing prometheus TLS config", constants.AnnotationPrometheusCAFile, constants.AnnotationPrometheusCAPath) + return nil, fmt.Errorf( + "must set one of %q or %q when providing prometheus TLS config", + constants.AnnotationPrometheusCAFile, + constants.AnnotationPrometheusCAPath, + ) } if prometheusCertFile == "" { - return nil, fmt.Errorf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusCertFile) + return nil, fmt.Errorf( + "must set %q when providing prometheus TLS config", + constants.AnnotationPrometheusCertFile, + ) } if prometheusKeyFile == "" { - return nil, fmt.Errorf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusKeyFile) + return nil, fmt.Errorf( + "must set %q when providing prometheus TLS config", + constants.AnnotationPrometheusKeyFile, + ) } // TLS config has been validated, add them to the consul-dataplane cmd args args = append(args, "-telemetry-prom-ca-certs-file="+prometheusCAFile, @@ -478,7 +533,12 @@ func (w *MeshWebhook) sidecarResources(pod corev1.Pod) (corev1.ResourceRequireme if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyCPULimit]; ok { cpuLimit, err := resource.ParseQuantity(anno) if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyCPULimit, anno, err) + return corev1.ResourceRequirements{}, fmt.Errorf( + "parsing annotation %s:%q: %s", + constants.AnnotationSidecarProxyCPULimit, + anno, + err, + ) } resources.Limits[corev1.ResourceCPU] = cpuLimit } else if w.DefaultProxyCPULimit != zeroQuantity { @@ -489,7 +549,12 @@ func (w *MeshWebhook) sidecarResources(pod corev1.Pod) (corev1.ResourceRequireme if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyCPURequest]; ok { cpuRequest, err := resource.ParseQuantity(anno) if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyCPURequest, anno, err) + return corev1.ResourceRequirements{}, fmt.Errorf( + "parsing annotation %s:%q: %s", + constants.AnnotationSidecarProxyCPURequest, + anno, + err, + ) } resources.Requests[corev1.ResourceCPU] = cpuRequest } else if w.DefaultProxyCPURequest != zeroQuantity { @@ -500,7 +565,12 @@ func (w *MeshWebhook) sidecarResources(pod corev1.Pod) (corev1.ResourceRequireme if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyMemoryLimit]; ok { memoryLimit, err := resource.ParseQuantity(anno) if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyMemoryLimit, anno, err) + return corev1.ResourceRequirements{}, fmt.Errorf( + "parsing annotation %s:%q: %s", + constants.AnnotationSidecarProxyMemoryLimit, + anno, + err, + ) } resources.Limits[corev1.ResourceMemory] = memoryLimit } else if w.DefaultProxyMemoryLimit != zeroQuantity { @@ -511,7 +581,12 @@ func (w *MeshWebhook) sidecarResources(pod corev1.Pod) (corev1.ResourceRequireme if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyMemoryRequest]; ok { memoryRequest, err := resource.ParseQuantity(anno) if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyMemoryRequest, anno, err) + return corev1.ResourceRequirements{}, fmt.Errorf( + "parsing annotation %s:%q: %s", + constants.AnnotationSidecarProxyMemoryRequest, + anno, + err, + ) } resources.Requests[corev1.ResourceMemory] = memoryRequest } else if w.DefaultProxyMemoryRequest != zeroQuantity { diff --git a/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go b/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go index 936e51a559..69673dd794 100644 --- a/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go +++ b/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go @@ -302,7 +302,6 @@ func TestHandlerConsulDataplaneSidecar_Concurrency(t *testing.T) { // Test that we pass the dns proxy flag to dataplane correctly. func TestHandlerConsulDataplaneSidecar_DNSProxy(t *testing.T) { - // We only want the flag passed when DNS and tproxy are both enabled. DNS/tproxy can // both be enabled/disabled with annotations/labels on the pod and namespace and then globally // through the helm chart. To test this we use an outer loop with the possible DNS settings and then @@ -363,7 +362,6 @@ func TestHandlerConsulDataplaneSidecar_DNSProxy(t *testing.T) { for i, dnsCase := range dnsCases { for j, tproxyCase := range tproxyCases { t.Run(fmt.Sprintf("dns=%d,tproxy=%d", i, j), func(t *testing.T) { - // Test setup. h := MeshWebhook{ ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, @@ -828,8 +826,8 @@ func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { tproxyEnabled: true, openShiftEnabled: true, expSecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), + RunAsUser: pointer.Int64(1000700000), + RunAsGroup: pointer.Int64(1000700000), RunAsNonRoot: pointer.Bool(true), ReadOnlyRootFilesystem: pointer.Bool(true), AllowPrivilegeEscalation: pointer.Bool(false), @@ -837,6 +835,19 @@ func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { }, } for name, c := range cases { + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: k8sNamespace, + Namespace: k8sNamespace, + Annotations: map[string]string{}, + Labels: map[string]string{}, + }, + } + + if c.openShiftEnabled { + ns.Annotations[constants.AnnotationOpenShiftUIDRange] = "1000700000/100000" + ns.Annotations[constants.AnnotationOpenShiftGroups] = "1000700000/100000" + } t.Run(name, func(t *testing.T) { w := MeshWebhook{ EnableTransparentProxy: c.tproxyEnabled, @@ -845,6 +856,7 @@ func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { } pod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, Annotations: map[string]string{ constants.AnnotationService: "foo", }, @@ -858,7 +870,7 @@ func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { }, }, } - ec, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) + ec, err := w.consulDataplaneSidecar(ns, pod, multiPortInfo{}) require.NoError(t, err) require.Equal(t, c.expSecurityContext, ec.SecurityContext) }) @@ -885,7 +897,10 @@ func TestHandlerConsulDataplaneSidecar_FailsWithDuplicatePodSecurityContextUID(t }, } _, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - require.EqualError(err, fmt.Sprintf("pod's security context cannot have the same UID as consul-dataplane: %v", sidecarUserAndGroupID)) + require.EqualError( + err, + fmt.Sprintf("pod's security context cannot have the same UID as consul-dataplane: %v", sidecarUserAndGroupID), + ) } // Test that if the user specifies a container with security context with the same uid as `sidecarUserAndGroupID` that we @@ -922,9 +937,12 @@ func TestHandlerConsulDataplaneSidecar_FailsWithDuplicateContainerSecurityContex }, }, }, - webhook: MeshWebhook{}, - expErr: true, - expErrMessage: fmt.Sprintf("container \"app\" has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", sidecarUserAndGroupID), + webhook: MeshWebhook{}, + expErr: true, + expErrMessage: fmt.Sprintf( + "container \"app\" has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", + sidecarUserAndGroupID, + ), }, { name: "doesn't fail with envoy image", @@ -1348,7 +1366,11 @@ func TestHandlerConsulDataplaneSidecar_Metrics(t *testing.T) { }, }, expCmdArgs: "", - expErr: fmt.Sprintf("must set one of %q or %q when providing prometheus TLS config", constants.AnnotationPrometheusCAFile, constants.AnnotationPrometheusCAPath), + expErr: fmt.Sprintf( + "must set one of %q or %q when providing prometheus TLS config", + constants.AnnotationPrometheusCAFile, + constants.AnnotationPrometheusCAPath, + ), }, { name: "merge metrics with TLS enabled, missing cert gives an error", diff --git a/control-plane/connect-inject/webhook/container_init.go b/control-plane/connect-inject/webhook/container_init.go index effee89150..48d893f6d6 100644 --- a/control-plane/connect-inject/webhook/container_init.go +++ b/control-plane/connect-inject/webhook/container_init.go @@ -255,16 +255,41 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, }, } } else { - container.SecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(initContainersUserAndGroupID), - RunAsGroup: pointer.Int64(initContainersUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - Privileged: pointer.Bool(privileged), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, - ReadOnlyRootFilesystem: pointer.Bool(true), - AllowPrivilegeEscalation: pointer.Bool(false), + if !w.EnableOpenShift { + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(initContainersUserAndGroupID), + RunAsGroup: pointer.Int64(initContainersUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + Privileged: pointer.Bool(privileged), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + } + } else { + // Transparent proxy + CNI is set in OpenShift. There is an annotation on the namespace that tells us what + // the user and group ids should be for the sidecar. + uid, err := common.GetOpenShiftUID(&namespace) + if err != nil { + return corev1.Container{}, err + } + group, err := common.GetOpenShiftGroup(&namespace) + if err != nil { + return corev1.Container{}, err + } + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(uid), + RunAsGroup: pointer.Int64(group), + RunAsNonRoot: pointer.Bool(true), + Privileged: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + } + } } } @@ -275,7 +300,12 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, // consulDNSEnabled returns true if Consul DNS should be enabled for this pod. // It returns an error when the annotation value cannot be parsed by strconv.ParseBool or if we are unable // to read the pod's namespace label when it exists. -func consulDNSEnabled(namespace corev1.Namespace, pod corev1.Pod, globalDNSEnabled bool, globalTProxyEnabled bool) (bool, error) { +func consulDNSEnabled( + namespace corev1.Namespace, + pod corev1.Pod, + globalDNSEnabled bool, + globalTProxyEnabled bool, +) (bool, error) { // DNS is only possible when tproxy is also enabled because it relies // on traffic being redirected. tproxy, err := common.TransparentProxyEnabled(namespace, pod, globalTProxyEnabled) diff --git a/control-plane/connect-inject/webhook/container_init_test.go b/control-plane/connect-inject/webhook/container_init_test.go index 8feac95b84..5896c0c0eb 100644 --- a/control-plane/connect-inject/webhook/container_init_test.go +++ b/control-plane/connect-inject/webhook/container_init_test.go @@ -293,7 +293,7 @@ func TestHandlerContainerInit_transparentProxy(t *testing.T) { } var expectedSecurityContext *corev1.SecurityContext - if c.cniEnabled { + if c.cniEnabled && !c.openShiftEnabled { expectedSecurityContext = &corev1.SecurityContext{ RunAsUser: pointer.Int64(initContainersUserAndGroupID), RunAsGroup: pointer.Int64(initContainersUserAndGroupID), @@ -315,8 +315,34 @@ func TestHandlerContainerInit_transparentProxy(t *testing.T) { Add: []corev1.Capability{netAdminCapability}, }, } + } else if c.cniEnabled && c.openShiftEnabled { + // When cni + openShift + expectedSecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(1000700000), + RunAsGroup: pointer.Int64(1000700000), + RunAsNonRoot: pointer.Bool(true), + Privileged: pointer.Bool(privileged), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + ReadOnlyRootFilesystem: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + } } - ns := testNS + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: k8sNamespace, + Namespace: k8sNamespace, + Annotations: map[string]string{}, + Labels: map[string]string{}, + }, + } + + if c.openShiftEnabled { + ns.Annotations[constants.AnnotationOpenShiftUIDRange] = "1000700000/100000" + ns.Annotations[constants.AnnotationOpenShiftGroups] = "1000700000/100000" + } + ns.Labels = c.namespaceLabel container, err := w.containerInit(ns, *pod, multiPortInfo{}) require.NoError(t, err) @@ -785,7 +811,8 @@ func TestHandlerContainerInit_Multiport(t *testing.T) { serviceName: "web-admin", }, }, - []string{`/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + []string{ + `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ -log-level=info \ -log-json=false \ -multiport=true \ @@ -823,7 +850,8 @@ func TestHandlerContainerInit_Multiport(t *testing.T) { serviceName: "web-admin", }, }, - []string{`/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + []string{ + `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ -log-level=info \ -log-json=false \ -service-account-name="web" \ @@ -922,7 +950,6 @@ func TestHandlerContainerInit_WithTLSAndCustomPorts(t *testing.T) { } } } - }) } } diff --git a/control-plane/connect-inject/webhook/redirect_traffic.go b/control-plane/connect-inject/webhook/redirect_traffic.go index f928df4afd..e6de09f448 100644 --- a/control-plane/connect-inject/webhook/redirect_traffic.go +++ b/control-plane/connect-inject/webhook/redirect_traffic.go @@ -19,7 +19,7 @@ import ( // iptables.Config: // // ConsulDNSIP: an environment variable named RESOURCE_PREFIX_DNS_SERVICE_HOST where RESOURCE_PREFIX is the consul.fullname in helm. -// ProxyUserID: a constant set in Annotations +// ProxyUserID: a constant set in Annotations or read from namespace when using OpenShift // ProxyInboundPort: the service port or bind port // ProxyOutboundPort: default transparent proxy outbound port or transparent proxy outbound listener port // ExcludeInboundPorts: prometheus, envoy stats, expose paths, checks and excluded pod annotations @@ -27,8 +27,18 @@ import ( // ExcludeOutboundCIDRs: pod annotations // ExcludeUIDs: pod annotations func (w *MeshWebhook) iptablesConfigJSON(pod corev1.Pod, ns corev1.Namespace) (string, error) { - cfg := iptables.Config{ - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + cfg := iptables.Config{} + + if !w.EnableOpenShift { + cfg.ProxyUserID = strconv.Itoa(sidecarUserAndGroupID) + } else { + // When using OpenShift, the uid and group are saved as an annotation on the namespace + uid, err := common.GetOpenShiftUID(&ns) + if err != nil { + return "", err + } + cfg.ProxyUserID = strconv.FormatInt(uid, 10) + } // Set the proxy's inbound port. diff --git a/control-plane/go.mod b/control-plane/go.mod index c4a9063ae9..98e970090f 100644 --- a/control-plane/go.mod +++ b/control-plane/go.mod @@ -161,4 +161,4 @@ require ( sigs.k8s.io/yaml v1.3.0 // indirect ) -go 1.20 +go 1.21 diff --git a/control-plane/go.sum b/control-plane/go.sum index 33b82dd0a3..b9664c2a7b 100644 --- a/control-plane/go.sum +++ b/control-plane/go.sum @@ -67,6 +67,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -130,6 +131,7 @@ github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nb github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= @@ -184,6 +186,7 @@ github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -221,6 +224,7 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -296,6 +300,7 @@ github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -345,6 +350,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da h1:FjHUJJ7oBW4G/9j1KzlHaXL09LyMVM9rupS39lncbXk= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -374,6 +380,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -490,6 +497,7 @@ github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5N github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= @@ -548,6 +556,7 @@ go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= @@ -884,7 +893,9 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=