From 14ce7399cd435379429fe66fb507095ad4d8816f Mon Sep 17 00:00:00 2001 From: Ashwin Venkatesh Date: Fri, 15 Oct 2021 13:18:39 -0700 Subject: [PATCH] Add ACL support (#766) * Add ACL support - Update server-acl-init job to create tokens that are partition aware when Admin Partitions are enabled. - server-acl-init creates a partition-token that is used by partition-init and server-acl-init in non-default-partitions. - Update partition-init to use provided partition-token when ACLs are enabled. - Update license-policy to be acl:write when created in a partition. * Add connect style acceptance tests to test namespace and connect within partitions * Add changelog and update consul image --- CHANGELOG.md | 99 +++ acceptance/go.mod | 2 +- acceptance/go.sum | 5 +- .../kustomization.yaml | 0 .../cases/static-client-partition/patch.yaml | 0 .../tests/partitions/main_test.go | 2 +- .../tests/partitions/partitions_test.go | 445 +++++++++++ .../consul/templates/partition-init-job.yaml | 13 +- .../consul/templates/server-acl-init-job.yaml | 5 +- .../tests/partitions/partitions_test.go | 146 ---- charts/consul/test/unit/client-daemonset.bats | 4 +- .../consul/test/unit/partition-init-job.bats | 18 +- .../consul/test/unit/server-acl-init-job.bats | 39 + .../subcommand/partition-init/command.go | 4 +- .../subcommand/server-acl-init/command.go | 58 +- .../server-acl-init/command_ent_test.go | 236 +++--- .../subcommand/server-acl-init/rules.go | 201 +++-- .../subcommand/server-acl-init/rules_test.go | 734 ++++++++++++------ 18 files changed, 1432 insertions(+), 579 deletions(-) rename {charts/consul/test/acceptance => acceptance}/tests/fixtures/cases/static-client-partition/kustomization.yaml (100%) rename {charts/consul/test/acceptance => acceptance}/tests/fixtures/cases/static-client-partition/patch.yaml (100%) rename {charts/consul/test/acceptance => acceptance}/tests/partitions/main_test.go (77%) create mode 100644 acceptance/tests/partitions/partitions_test.go delete mode 100644 charts/consul/test/acceptance/tests/partitions/partitions_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index b1ed8c762b..7eedfd277b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,105 @@ IMPROVEMENTS: * Upgrade Docker image Alpine version from 3.13 to 3.14. [[GH-737](https://github.com/hashicorp/consul-k8s/pull/737)] * Helm Chart * Enable adding extra containers to server and client Pods. [[GH-749](https://github.com/hashicorp/consul-k8s/pull/749)] + * ACL support for Admin Partitions. **(Consul Enterprise only)** + **BETA** [[GH-766](https://github.com/hashicorp/consul-k8s/pull/766)] + * This feature now enabled ACL support for Admin Partitions. The server-acl-init job now creates a Partition token. This token +can be used to bootstrap new partitions as well as manage ACLs in the non-default partitions. + * Partition to partition networking is disabled if ACLs are enabled. + +To enabled ACLs on the server cluster use the following config: +```yaml +global: + enableConsulNamespaces: true + tls: + enabled: true + image: hashicorp/consul-enterprise:1.11.0-ent-beta1 + adminPartitions: + enabled: true + acls: + manageSystemACLs: true +server: + exposeGossipAndRPCPorts: true + enterpriseLicense: + secretName: license + secretKey: key + replicas: 1 +connectInject: + enabled: true + transparentProxy: + defaultEnabled: false + consulNamespaces: + mirroringK8S: true +controller: + enabled: true +``` + +Identify the LoadBalancer External IP of the `partition-service` +```bash +kubectl get svc consul-consul-partition-service -o json | jq -r '.status.loadBalancer.ingress[0].ip' +``` + +Migrate the TLS CA credentials from the server cluster to the workload clusters +```bash +kubectl get secret consul-consul-ca-key --context "server-context" -o json | kubectl apply --context "workload-context" -f - +kubectl get secret consul-consul-ca-cert --context "server-context" -o json | kubectl apply --context "workload-context" -f - +``` + +Migrate the Partition token from the server cluster to the workload clusters +```bash +kubectl get secret consul-consul-partitions-acl-token --context "server-context" -o json | kubectl apply --context "workload-context" -f - +``` + +Identify the Kubernetes AuthMethod URL of the workload cluster to use as the `k8sAuthMethodHost`: +```bash +kubectl config view -o "jsonpath={.clusters[?(@.name=='workload-cluster-name')].cluster.server}" +``` + +Configure the workload cluster using the following: + +```yaml +global: + enabled: false + enableConsulNamespaces: true + image: hashicorp/consul-enterprise:1.11.0-ent-beta1 + adminPartitions: + enabled: true + name: "partition-name" + tls: + enabled: true + caCert: + secretName: consul-consul-ca-cert + secretKey: tls.crt + caKey: + secretName: consul-consul-ca-key + secretKey: tls.key + acls: + manageSystemACLs: true + bootstrapToken: + secretName: consul-consul-partitions-acl-token + secretKey: token +server: + enterpriseLicense: + secretName: license + secretKey: key +externalServers: + enabled: true + hosts: [ "loadbalancer IP" ] + tlsServerName: server.dc1.consul + k8sAuthMethodHost: "authmethod-host IP" +client: + enabled: true + exposeGossipPorts: true + join: [ "loadbalancer IP" ] +connectInject: + enabled: true + consulNamespaces: + mirroringK8S: true +controller: + enabled: true +``` +This should create clusters that have Admin Partitions deployed on them with ACLs enabled. + * CLI * Add `version` command. [[GH-741](https://github.com/hashicorp/consul-k8s/pull/741)] diff --git a/acceptance/go.mod b/acceptance/go.mod index 040d14ee87..5e017ccec6 100644 --- a/acceptance/go.mod +++ b/acceptance/go.mod @@ -4,7 +4,7 @@ go 1.14 require ( github.com/gruntwork-io/terratest v0.31.2 - github.com/hashicorp/consul/api v1.9.0 + github.com/hashicorp/consul/api v1.10.1-0.20210915232521-e0a7900f52bf github.com/hashicorp/consul/sdk v0.8.0 github.com/stretchr/testify v1.5.1 gopkg.in/yaml.v2 v2.2.8 diff --git a/acceptance/go.sum b/acceptance/go.sum index f91e3dc13c..ab20eec364 100644 --- a/acceptance/go.sum +++ b/acceptance/go.sum @@ -225,8 +225,9 @@ github.com/gruntwork-io/gruntwork-cli v0.7.0 h1:YgSAmfCj9c61H+zuvHwKfYUwlMhu5arn github.com/gruntwork-io/gruntwork-cli v0.7.0/go.mod h1:jp6Z7NcLF2avpY8v71fBx6hds9eOFPELSuD/VPv7w00= github.com/gruntwork-io/terratest v0.31.2 h1:xvYHA80MUq5kx670dM18HInewOrrQrAN+XbVVtytUHg= github.com/gruntwork-io/terratest v0.31.2/go.mod h1:EEgJie28gX/4AD71IFqgMj6e99KP5mi81hEtzmDjxTo= -github.com/hashicorp/consul/api v1.9.0 h1:T6dKIWcaihG2c21YUi0BMAHbJanVXiYuz+mPgqxY3N4= -github.com/hashicorp/consul/api v1.9.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.10.1-0.20210915232521-e0a7900f52bf h1:fouyN8SkrE4py09XaOru4PCM9zunem39CjOrMJMrKsc= +github.com/hashicorp/consul/api v1.10.1-0.20210915232521-e0a7900f52bf/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= diff --git a/charts/consul/test/acceptance/tests/fixtures/cases/static-client-partition/kustomization.yaml b/acceptance/tests/fixtures/cases/static-client-partition/kustomization.yaml similarity index 100% rename from charts/consul/test/acceptance/tests/fixtures/cases/static-client-partition/kustomization.yaml rename to acceptance/tests/fixtures/cases/static-client-partition/kustomization.yaml diff --git a/charts/consul/test/acceptance/tests/fixtures/cases/static-client-partition/patch.yaml b/acceptance/tests/fixtures/cases/static-client-partition/patch.yaml similarity index 100% rename from charts/consul/test/acceptance/tests/fixtures/cases/static-client-partition/patch.yaml rename to acceptance/tests/fixtures/cases/static-client-partition/patch.yaml diff --git a/charts/consul/test/acceptance/tests/partitions/main_test.go b/acceptance/tests/partitions/main_test.go similarity index 77% rename from charts/consul/test/acceptance/tests/partitions/main_test.go rename to acceptance/tests/partitions/main_test.go index c26f293f10..b2758a572c 100644 --- a/charts/consul/test/acceptance/tests/partitions/main_test.go +++ b/acceptance/tests/partitions/main_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - testsuite "github.com/hashicorp/consul-k8s/charts/consul/test/acceptance/framework/suite" + testsuite "github.com/hashicorp/consul-k8s/acceptance/framework/suite" ) var suite testsuite.Suite diff --git a/acceptance/tests/partitions/partitions_test.go b/acceptance/tests/partitions/partitions_test.go new file mode 100644 index 0000000000..0e48ddaa4d --- /dev/null +++ b/acceptance/tests/partitions/partitions_test.go @@ -0,0 +1,445 @@ +package partitions + +import ( + "context" + "fmt" + "strconv" + "testing" + + terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const staticClientName = "static-client" +const staticServerName = "static-server" +const staticServerNamespace = "ns1" +const staticClientNamespace = "ns2" + +// Test that Connect works in a default installation. +// i.e. without ACLs because TLS is required for setting up Admin Partitions. +func TestPartitions(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + + if !cfg.EnableEnterprise { + t.Skipf("skipping this test because -enable-enterprise is not set") + } + + if !cfg.UseKind { + t.Skipf("skipping this test because Admin Partition tests are only supported in Kind for now") + } + + if cfg.EnableTransparentProxy { + t.Skipf("skipping this test because -enable-transparent-proxy is true") + } + + cases := []struct { + name string + destinationNamespace string + mirrorK8S bool + secure bool + }{ + { + "default namespace", + "default", + false, + false, + }, + { + "default namespace; secure", + "default", + false, + true, + }, + { + "single destination namespace", + staticServerNamespace, + false, + false, + }, + { + "single destination namespace; secure", + staticServerNamespace, + false, + true, + }, + { + "mirror k8s namespaces", + staticServerNamespace, + true, + false, + }, + { + "mirror k8s namespaces; secure", + staticServerNamespace, + true, + true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + serverClusterContext := env.DefaultContext(t) + clientClusterContext := env.Context(t, environment.SecondaryContextName) + + ctx := context.Background() + + serverHelmValues := map[string]string{ + "global.datacenter": "dc1", + "global.image": "hashicorp/consul-enterprise:1.11.0-ent-beta1", + + "global.adminPartitions.enabled": "true", + "global.enableConsulNamespaces": "true", + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.secure), + + "server.exposeGossipAndRPCPorts": "true", + + "connectInject.enabled": "true", + // When mirroringK8S is set, this setting is ignored. + "connectInject.consulNamespaces.consulDestinationNamespace": c.destinationNamespace, + "connectInject.consulNamespaces.mirroringK8S": strconv.FormatBool(c.mirrorK8S), + "connectInject.transparentProxy.defaultEnabled": "false", + + "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), + } + + if cfg.UseKind { + serverHelmValues["global.adminPartitions.service.type"] = "NodePort" + serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" + } + + releaseName := helpers.RandomName() + + // Install the consul cluster with servers in the default kubernetes context. + serverConsulCluster := consul.NewHelmCluster(t, serverHelmValues, serverClusterContext, cfg, releaseName) + serverConsulCluster.Create(t) + + // Get the TLS CA certificate and key secret from the server cluster and apply it to client cluster. + tlsCert := fmt.Sprintf("%s-consul-ca-cert", releaseName) + tlsKey := fmt.Sprintf("%s-consul-ca-key", releaseName) + + logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", tlsCert) + caCertSecret, err := serverClusterContext.KubernetesClient(t).CoreV1().Secrets(serverClusterContext.KubectlOptions(t).Namespace).Get(ctx, tlsCert, metav1.GetOptions{}) + caCertSecret.ResourceVersion = "" + require.NoError(t, err) + _, err = clientClusterContext.KubernetesClient(t).CoreV1().Secrets(clientClusterContext.KubectlOptions(t).Namespace).Create(ctx, caCertSecret, metav1.CreateOptions{}) + require.NoError(t, err) + + if !c.secure { + // When running in the insecure mode, auto-encrypt is disabled which requires both + // the CA cert and CA key to be available in the clients cluster. + logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", tlsKey) + caKeySecret, err := serverClusterContext.KubernetesClient(t).CoreV1().Secrets(serverClusterContext.KubectlOptions(t).Namespace).Get(ctx, tlsKey, metav1.GetOptions{}) + caKeySecret.ResourceVersion = "" + require.NoError(t, err) + _, err = clientClusterContext.KubernetesClient(t).CoreV1().Secrets(clientClusterContext.KubectlOptions(t).Namespace).Create(ctx, caKeySecret, metav1.CreateOptions{}) + require.NoError(t, err) + } + + partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) + if c.secure { + logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", tlsKey) + token, err := serverClusterContext.KubernetesClient(t).CoreV1().Secrets(serverClusterContext.KubectlOptions(t).Namespace).Get(ctx, partitionToken, metav1.GetOptions{}) + token.ResourceVersion = "" + require.NoError(t, err) + _, err = clientClusterContext.KubernetesClient(t).CoreV1().Secrets(clientClusterContext.KubectlOptions(t).Namespace).Create(ctx, token, metav1.CreateOptions{}) + require.NoError(t, err) + } + + var partitionSvcIP string + if !cfg.UseKind { + // Get the IP of the partition service to configure the external server address in the values file for the workload cluster. + partitionServiceName := fmt.Sprintf("%s-partition-secret", releaseName) + logger.Logf(t, "retrieving partition service to determine external IP for servers") + partitionsSvc, err := serverClusterContext.KubernetesClient(t).CoreV1().Services(serverClusterContext.KubectlOptions(t).Namespace).Get(ctx, partitionServiceName, metav1.GetOptions{}) + require.NoError(t, err) + partitionSvcIP = partitionsSvc.Status.LoadBalancer.Ingress[0].IP + } else { + nodeList, err := serverClusterContext.KubernetesClient(t).CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + require.NoError(t, err) + // Get the address of the (only) node from the Kind cluster. + partitionSvcIP = nodeList.Items[0].Status.Addresses[0].Address + } + + // The Kubernetes AuthMethod IP for Kind is read from the endpoint for the Kubernetes service. On other clouds, + // this can be identified by reading the cluster config. + kubernetesEndpoint, err := clientClusterContext.KubernetesClient(t).CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{}) + require.NoError(t, err) + k8sAuthMethodHost := fmt.Sprintf("%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) + + // Create client cluster. + clientHelmValues := map[string]string{ + "global.datacenter": "dc1", + "global.image": "hashicorp/consul-enterprise:1.11.0-ent-beta1", + "global.enabled": "false", + + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.secure), + + "server.exposeGossipAndRPCPorts": "true", + + "connectInject.enabled": "true", + // When mirroringK8S is set, this setting is ignored. + "connectInject.consulNamespaces.consulDestinationNamespace": c.destinationNamespace, + "connectInject.consulNamespaces.mirroringK8S": strconv.FormatBool(c.mirrorK8S), + "connectInject.transparentProxy.defaultEnabled": "false", + + "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), + + "global.adminPartitions.enabled": "true", + "global.adminPartitions.name": "secondary", + "global.enableConsulNamespaces": "true", + + "global.tls.caCert.secretName": tlsCert, + "global.tls.caCert.secretKey": "tls.crt", + + "externalServers.enabled": "true", + "externalServers.hosts[0]": partitionSvcIP, + "externalServers.tlsServerName": "server.dc1.consul", + "externalServers.k8sAuthMethodHost": k8sAuthMethodHost, + + "client.enabled": "true", + "client.exposeGossipPorts": "true", + "client.join[0]": partitionSvcIP, + } + + if c.secure { + // setup partition token if ACLs enabled. + clientHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken + clientHelmValues["global.acls.bootstrapToken.secretKey"] = "token" + } else { + // provide CA key when auto-encrypt is disabled. + clientHelmValues["global.tls.caKey.secretName"] = tlsKey + clientHelmValues["global.tls.caKey.secretKey"] = "tls.key" + } + + if cfg.UseKind { + clientHelmValues["externalServers.httpsPort"] = "30000" + } + + // Install the consul cluster without servers in the client cluster kubernetes context. + clientConsulCluster := consul.NewHelmCluster(t, clientHelmValues, clientClusterContext, cfg, releaseName) + clientConsulCluster.Create(t) + + agentPodList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(clientClusterContext.KubectlOptions(t).Namespace).List(ctx, metav1.ListOptions{LabelSelector: "app=consul,component=client"}) + require.NoError(t, err) + require.Len(t, agentPodList.Items, 1) + + output, err := k8s.RunKubectlAndGetOutputE(t, clientClusterContext.KubectlOptions(t), "logs", agentPodList.Items[0].Name, "-n", clientClusterContext.KubectlOptions(t).Namespace) + require.NoError(t, err) + require.Contains(t, output, "Partition: 'secondary'") + + serverClusterStaticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: serverClusterContext.KubectlOptions(t).ContextName, + ConfigPath: serverClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticServerNamespace, + } + serverClusterStaticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: serverClusterContext.KubectlOptions(t).ContextName, + ConfigPath: serverClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticClientNamespace, + } + clientClusterStaticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: clientClusterContext.KubectlOptions(t).ContextName, + ConfigPath: clientClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticServerNamespace, + } + clientClusterStaticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: clientClusterContext.KubectlOptions(t).ContextName, + ConfigPath: clientClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticClientNamespace, + } + + logger.Logf(t, "creating namespaces %s and %s in servers cluster", staticServerNamespace, staticClientNamespace) + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace) + }) + + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "create", "ns", staticClientNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + // Note: this deletion will take longer in cases when the static-client deployment + // hasn't yet fully terminated. + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "delete", "ns", staticClientNamespace) + }) + + logger.Logf(t, "creating namespaces %s and %s in clients cluster", staticServerNamespace, staticClientNamespace) + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace) + }) + + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "create", "ns", staticClientNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + // Note: this deletion will take longer in cases when the static-client deployment + // hasn't yet fully terminated. + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "delete", "ns", staticClientNamespace) + }) + + consulClient := serverConsulCluster.SetupConsulClient(t, c.secure) + + serverQueryServerOpts := &api.QueryOptions{Namespace: staticServerNamespace, Partition: "default"} + clientQueryServerOpts := &api.QueryOptions{Namespace: staticClientNamespace, Partition: "default"} + + serverQueryClientOpts := &api.QueryOptions{Namespace: staticServerNamespace, Partition: "secondary"} + clientQueryClientOpts := &api.QueryOptions{Namespace: staticClientNamespace, Partition: "secondary"} + + if !c.mirrorK8S { + serverQueryServerOpts = &api.QueryOptions{Namespace: c.destinationNamespace, Partition: "default"} + clientQueryServerOpts = &api.QueryOptions{Namespace: c.destinationNamespace, Partition: "default"} + serverQueryClientOpts = &api.QueryOptions{Namespace: c.destinationNamespace, Partition: "secondary"} + clientQueryClientOpts = &api.QueryOptions{Namespace: c.destinationNamespace, Partition: "secondary"} + } + + // Check that the ACL token is deleted. + if c.secure { + // We need to register the cleanup function before we create the deployments + // because golang will execute them in reverse order i.e. the last registered + // cleanup function will be executed first. + t.Cleanup(func() { + if c.secure { + retry.Run(t, func(r *retry.R) { + tokens, _, err := consulClient.ACL().TokenList(serverQueryServerOpts) + require.NoError(r, err) + for _, token := range tokens { + require.NotContains(r, token.Description, staticServerName) + } + + tokens, _, err = consulClient.ACL().TokenList(clientQueryServerOpts) + require.NoError(r, err) + for _, token := range tokens { + require.NotContains(r, token.Description, staticClientName) + } + tokens, _, err = consulClient.ACL().TokenList(serverQueryClientOpts) + require.NoError(r, err) + for _, token := range tokens { + require.NotContains(r, token.Description, staticServerName) + } + + tokens, _, err = consulClient.ACL().TokenList(clientQueryClientOpts) + require.NoError(r, err) + for _, token := range tokens { + require.NotContains(r, token.Description, staticClientName) + } + }) + } + }) + } + + logger.Log(t, "creating static-server and static-client deployments in server cluster") + k8s.DeployKustomize(t, serverClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + if c.destinationNamespace == "default" { + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + } else { + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") + } + logger.Log(t, "creating static-server and static-client deployments in client cluster") + k8s.DeployKustomize(t, clientClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + if c.destinationNamespace == "default" { + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + } else { + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") + } + // Check that both static-server and static-client have been injected and now have 2 containers in server cluster. + for _, labelSelector := range []string{"app=static-server", "app=static-client"} { + podList, err := serverClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: labelSelector, + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 2) + } + + // Check that both static-server and static-client have been injected and now have 2 containers in client cluster. + for _, labelSelector := range []string{"app=static-server", "app=static-client"} { + podList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: labelSelector, + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 2) + } + + // Make sure that services are registered in the correct namespace. + // If mirroring is enabled, we expect services to be registered in the + // Consul namespace with the same name as their source + // Kubernetes namespace. + // If a single destination namespace is set, we expect all services + // to be registered in that destination Consul namespace. + // Server cluster. + services, _, err := consulClient.Catalog().Service(staticServerName, "", serverQueryServerOpts) + require.NoError(t, err) + require.Len(t, services, 1) + + services, _, err = consulClient.Catalog().Service(staticClientName, "", clientQueryServerOpts) + require.NoError(t, err) + require.Len(t, services, 1) + + // Client cluster. + services, _, err = consulClient.Catalog().Service(staticServerName, "", serverQueryClientOpts) + require.NoError(t, err) + require.Len(t, services, 1) + + services, _, err = consulClient.Catalog().Service(staticClientName, "", clientQueryClientOpts) + require.NoError(t, err) + require.Len(t, services, 1) + + if c.secure { + logger.Log(t, "checking that the connection is not successful because there's no intention") + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, "http://localhost:1234") + + intention := &api.Intention{ + SourceName: staticClientName, + SourceNS: staticClientNamespace, + DestinationName: staticServerName, + DestinationNS: staticServerNamespace, + Action: api.IntentionActionAllow, + } + + // Set the destination namespace to be the same + // unless mirrorK8S is true. + if !c.mirrorK8S { + intention.SourceNS = c.destinationNamespace + intention.DestinationNS = c.destinationNamespace + } + + logger.Log(t, "creating intention") + _, err := consulClient.Connect().IntentionUpsert(intention, &api.WriteOptions{Partition: "default"}) + require.NoError(t, err) + _, err = consulClient.Connect().IntentionUpsert(intention, &api.WriteOptions{Partition: "secondary"}) + require.NoError(t, err) + } + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, "http://localhost:1234") + + // Test that kubernetes readiness status is synced to Consul. + // Create the file so that the readiness probe of the static-server pod fails. + logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") + k8s.RunKubectl(t, serverClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, clientClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + + // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry + // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. + // We are expecting a "connection reset by peer" error because in a case of health checks, + // there will be no healthy proxy host to connect to. That's why we can't assert that we receive an empty reply + // from server, which is the case when a connection is unsuccessful due to intentions in other tests. + logger.Log(t, "checking that connection is unsuccessful") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "http://localhost:1234") + }) + } +} diff --git a/charts/consul/templates/partition-init-job.yaml b/charts/consul/templates/partition-init-job.yaml index b9d438c436..10772cd75d 100644 --- a/charts/consul/templates/partition-init-job.yaml +++ b/charts/consul/templates/partition-init-job.yaml @@ -42,19 +42,26 @@ spec: path: tls.crt {{- end }} containers: - - name: post-install-job + - name: partition-init-job image: {{ .Values.global.imageK8S }} env: - name: NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - {{- if .Values.global.tls.enabled }} + {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.global.acls.bootstrapToken.secretName }} + key: {{ .Values.global.acls.bootstrapToken.secretKey }} + {{- end }} + {{- if .Values.global.tls.enabled }} volumeMounts: - name: consul-ca-cert mountPath: /consul/tls/ca readOnly: true - {{- end }} + {{- end }} command: - "/bin/sh" - "-ec" diff --git a/charts/consul/templates/server-acl-init-job.yaml b/charts/consul/templates/server-acl-init-job.yaml index 752774064a..07b9ab8e6b 100644 --- a/charts/consul/templates/server-acl-init-job.yaml +++ b/charts/consul/templates/server-acl-init-job.yaml @@ -134,7 +134,10 @@ spec: -sync-consul-node-name={{ .Values.syncCatalog.consulNodeName }} \ {{- end }} {{- end }} - + {{- if .Values.global.adminPartitions.enabled }} + -enable-partitions=true \ + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} {{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.global.enabled)) }} -allow-dns=true \ {{- end }} diff --git a/charts/consul/test/acceptance/tests/partitions/partitions_test.go b/charts/consul/test/acceptance/tests/partitions/partitions_test.go deleted file mode 100644 index bb6c1553eb..0000000000 --- a/charts/consul/test/acceptance/tests/partitions/partitions_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package partitions - -import ( - "context" - "fmt" - "testing" - - "github.com/hashicorp/consul-k8s/charts/consul/test/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/charts/consul/test/acceptance/framework/environment" - "github.com/hashicorp/consul-k8s/charts/consul/test/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/charts/consul/test/acceptance/framework/k8s" - "github.com/hashicorp/consul-k8s/charts/consul/test/acceptance/framework/logger" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Test that Connect works in a default installation. -// i.e. without ACLs because TLS is required for setting up Admin Partitions. -func TestPartitions(t *testing.T) { - env := suite.Environment() - cfg := suite.Config() - - if !cfg.EnableEnterprise { - t.Skipf("skipping this test because -enable-enterprise is not set") - } - - if !cfg.UseKind { - t.Skipf("skipping this test because Admin Partition tests are only supported in Kind for now") - } - - primaryContext := env.DefaultContext(t) - secondaryContext := env.Context(t, environment.SecondaryContextName) - - ctx := context.Background() - - primaryHelmValues := map[string]string{ - "global.datacenter": "dc1", - "global.image": "hashicorp/consul-enterprise:1.11.0-ent-alpha", - - "global.adminPartitions.enabled": "true", - "global.enableConsulNamespaces": "true", - "global.tls.enabled": "true", - - "server.exposeGossipAndRPCPorts": "true", - - "connectInject.enabled": "true", - } - - if cfg.UseKind { - primaryHelmValues["global.adminPartitions.service.type"] = "NodePort" - primaryHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" - } - - releaseName := helpers.RandomName() - - // Install the consul cluster with servers in the default kubernetes context. - primaryConsulCluster := consul.NewHelmCluster(t, primaryHelmValues, primaryContext, cfg, releaseName) - primaryConsulCluster.Create(t) - - // Get the TLS CA certificate and key secret from the primary cluster and apply it to secondary cluster - tlsCert := fmt.Sprintf("%s-consul-ca-cert", releaseName) - logger.Logf(t, "retrieving ca cert secret %s from the primary cluster and applying to the secondary", tlsCert) - caCertSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(ctx, tlsCert, metav1.GetOptions{}) - caCertSecret.ResourceVersion = "" - require.NoError(t, err) - _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(ctx, caCertSecret, metav1.CreateOptions{}) - require.NoError(t, err) - - tlsKey := fmt.Sprintf("%s-consul-ca-key", releaseName) - logger.Logf(t, "retrieving ca key secret %s from the primary cluster and applying to the secondary", tlsKey) - caKeySecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(ctx, tlsKey, metav1.GetOptions{}) - caKeySecret.ResourceVersion = "" - require.NoError(t, err) - _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(ctx, caKeySecret, metav1.CreateOptions{}) - require.NoError(t, err) - - var partitionSvcIP string - if !cfg.UseKind { - // Get the IP of the partition service to configure the external server address in the values file for the workload cluster. - partitionServiceName := fmt.Sprintf("%s-partition-secret", releaseName) - logger.Logf(t, "retrieving partition service to determine external IP for servers") - partitionsSvc, err := primaryContext.KubernetesClient(t).CoreV1().Services(primaryContext.KubectlOptions(t).Namespace).Get(ctx, partitionServiceName, metav1.GetOptions{}) - require.NoError(t, err) - partitionSvcIP = partitionsSvc.Status.LoadBalancer.Ingress[0].IP - } else { - nodeList, err := primaryContext.KubernetesClient(t).CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - require.NoError(t, err) - // Get the address of the (only) node from the Kind cluster. - partitionSvcIP = nodeList.Items[0].Status.Addresses[0].Address - } - - // Create secondary cluster - secondaryHelmValues := map[string]string{ - "global.datacenter": "dc1", - "global.image": "hashicorp/consul-enterprise:1.11.0-ent-alpha", - "global.enabled": "false", - - "global.adminPartitions.enabled": "true", - "global.adminPartitions.name": "secondary", - "global.enableConsulNamespaces": "true", - - "global.tls.enabled": "true", - "global.tls.caCert.secretName": tlsCert, - "global.tls.caCert.secretKey": "tls.crt", - "global.tls.caKey.secretName": tlsKey, - "global.tls.caKey.secretKey": "tls.key", - - "externalServers.enabled": "true", - "externalServers.hosts[0]": partitionSvcIP, - "externalServers.tlsServerName": "server.dc1.consul", - - "client.enabled": "true", - "client.exposeGossipPorts": "true", - "client.join[0]": partitionSvcIP, - - "connectInject.enabled": "true", - } - - if cfg.UseKind { - secondaryHelmValues["externalServers.httpsPort"] = "30000" - } - - // Install the consul cluster without servers in the secondary kubernetes context. - secondaryConsulCluster := consul.NewHelmCluster(t, secondaryHelmValues, secondaryContext, cfg, releaseName) - secondaryConsulCluster.Create(t) - - agentPodList, err := secondaryContext.KubernetesClient(t).CoreV1().Pods(secondaryContext.KubectlOptions(t).Namespace).List(ctx, metav1.ListOptions{LabelSelector: "app=consul,component=client"}) - require.NoError(t, err) - require.Len(t, agentPodList.Items, 1) - - output, err := k8s.RunKubectlAndGetOutputE(t, secondaryContext.KubectlOptions(t), "logs", agentPodList.Items[0].Name, "-n", secondaryContext.KubectlOptions(t).Namespace) - require.NoError(t, err) - require.Contains(t, output, "Partition: 'secondary'") - - // TODO: These can be enabled once mesh gateways are used for communication between services. Currently we cant setup a flat pod network on Kind. - // Check that we can connect services over the mesh gateways - - //logger.Log(t, "creating static-server in workload cluster") - //k8s.DeployKustomize(t, secondaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") - - //logger.Log(t, "creating static-client in server cluster") - //k8s.DeployKustomize(t, primaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partition") - - //logger.Log(t, "checking that connection is successful") - //k8s.CheckStaticServerConnectionSuccessful(t, primaryContext.KubectlOptions(t), "http://localhost:1234") -} diff --git a/charts/consul/test/unit/client-daemonset.bats b/charts/consul/test/unit/client-daemonset.bats index e0560c8c60..64a9d0e4c0 100755 --- a/charts/consul/test/unit/client-daemonset.bats +++ b/charts/consul/test/unit/client-daemonset.bats @@ -1407,7 +1407,7 @@ rollingUpdate: #-------------------------------------------------------------------- # partitions -@test "client/DaemonSet: -partitions can be set by global.adminPartition.enabled" { +@test "client/DaemonSet: -partitions can be set by global.adminPartitions.enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ @@ -1417,7 +1417,7 @@ rollingUpdate: [ "${actual}" = "true" ] } -@test "client/DaemonSet: -partitions can be overridden by global.adminPartition.name" { +@test "client/DaemonSet: -partitions can be overridden by global.adminPartitions.name" { cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ diff --git a/charts/consul/test/unit/partition-init-job.bats b/charts/consul/test/unit/partition-init-job.bats index ac6d5ccd27..addf34266e 100644 --- a/charts/consul/test/unit/partition-init-job.bats +++ b/charts/consul/test/unit/partition-init-job.bats @@ -93,4 +93,20 @@ load _helpers # check that the volume uses the provided secret key actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) [ "${actual}" = "key" ] -} \ No newline at end of file +} + +#-------------------------------------------------------------------- +# global.acls.bootstrapToken + +@test "partitionInit/Job: HTTP_TOKEN is set when global.acls.bootstrapToken is provided" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.acls.bootstrapToken.secretName=partition-token' \ + --set 'global.acls.bootstrapToken.secretKey=token' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/server-acl-init-job.bats b/charts/consul/test/unit/server-acl-init-job.bats index 4fd6867d17..a0ae9a34c5 100644 --- a/charts/consul/test/unit/server-acl-init-job.bats +++ b/charts/consul/test/unit/server-acl-init-job.bats @@ -995,6 +995,45 @@ load _helpers [ "${actual}" = "true" ] } +#-------------------------------------------------------------------- +# admin partitions + +@test "serverACLInit/Job: admin partitions disabled by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-partitions"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("partition"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: admin partitions enabled when admin partitions are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-partitions"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("partition"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # global.acls.createReplicationToken diff --git a/control-plane/subcommand/partition-init/command.go b/control-plane/subcommand/partition-init/command.go index bdbb4df21d..4b75f9634f 100644 --- a/control-plane/subcommand/partition-init/command.go +++ b/control-plane/subcommand/partition-init/command.go @@ -23,6 +23,7 @@ type Command struct { flags *flag.FlagSet k8s *k8sflags.K8SFlags + http *flags.HTTPFlags flagPartitionName string @@ -65,7 +66,6 @@ func (c *Command) init() { "The server name to set as the SNI header when sending HTTPS requests to Consul.") c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, "Toggle for using HTTPS for all API calls to Consul.") - c.flags.DurationVar(&c.flagTimeout, "timeout", 10*time.Minute, "How long we'll try to bootstrap Partitions for before timing out, e.g. 1ms, 2s, 3m") c.flags.StringVar(&c.flagLogLevel, "log-level", "info", @@ -75,7 +75,9 @@ func (c *Command) init() { "Enable or disable JSON output format for logging.") c.k8s = &k8sflags.K8SFlags{} + c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.k8s.Flags()) + flags.Merge(c.flags, c.http.Flags()) c.help = flags.Usage(help, c.flags) // Default retry to 1s. This is exposed for setting in tests. diff --git a/control-plane/subcommand/server-acl-init/command.go b/control-plane/subcommand/server-acl-init/command.go index b69b66edbb..7a9f2b40e2 100644 --- a/control-plane/subcommand/server-acl-init/command.go +++ b/control-plane/subcommand/server-acl-init/command.go @@ -56,18 +56,22 @@ type Command struct { flagIngressGatewayNames []string flagTerminatingGatewayNames []string - // Flags to configure Consul connection + // Flags to configure Consul connection. flagServerAddresses []string flagServerPort uint flagConsulCACert string flagConsulTLSServerName string flagUseHTTPS bool - // Flags for ACL replication + // Flags for ACL replication. flagCreateACLReplicationToken bool flagACLReplicationTokenFile string - // Flags to support namespaces + // Flags to support partitions. + flagEnablePartitions bool // true if Admin Partitions are enabled + flagPartitionName string // name of the Admin Partition + + // Flags to support namespaces. flagEnableNamespaces bool // Use namespacing on all components flagConsulSyncDestinationNamespace string // Consul namespace to register all catalog sync services into if not mirroring flagEnableSyncK8SNSMirroring bool // Enables mirroring of k8s namespaces into Consul for catalog sync @@ -76,7 +80,7 @@ type Command struct { flagEnableInjectK8SNSMirroring bool // Enables mirroring of k8s namespaces into Consul for Connect inject flagInjectK8SNSMirroringPrefix string // Prefix added to Consul namespaces created when mirroring injected services - // Flag to support a custom bootstrap token + // Flag to support a custom bootstrap token. flagBootstrapTokenFile string flagLogLevel string @@ -169,6 +173,11 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, "Toggle for using HTTPS for all API calls to Consul.") + c.flags.BoolVar(&c.flagEnablePartitions, "enable-partitions", false, + "[Enterprise Only] Enables Admin Partitions [Enterprise only feature]") + c.flags.StringVar(&c.flagPartitionName, "partition", "", + "[Enterprise Only] Name of the Admin Partition") + c.flags.BoolVar(&c.flagEnableNamespaces, "enable-namespaces", false, "[Enterprise Only] Enables namespaces, in either a single Consul namespace or mirrored [Enterprise only feature]") c.flags.StringVar(&c.flagConsulSyncDestinationNamespace, "consul-sync-destination-namespace", consulDefaultNamespace, @@ -349,7 +358,7 @@ func (c *Command) Run(args []string) int { // For all of the next operations we'll need a Consul client. serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) - consulClient, err := consul.NewClient(&api.Config{ + clientConfig := &api.Config{ Address: serverAddr, Scheme: scheme, Token: bootstrapToken, @@ -357,7 +366,12 @@ func (c *Command) Run(args []string) int { Address: c.flagConsulTLSServerName, CAFile: c.flagConsulCACert, }, - }) + } + if c.flagEnablePartitions { + clientConfig.Partition = c.flagPartitionName + } + + consulClient, err := consul.NewClient(clientConfig) if err != nil { c.log.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) return 1 @@ -382,6 +396,15 @@ func (c *Command) Run(args []string) int { } } + if c.flagEnablePartitions && c.flagPartitionName == consulDefaultPartition && isPrimary { + // Partition token must be local because only the Primary datacenter can have Admin Partitions. + err := c.createLocalACL("partitions", partitionRules, consulDC, isPrimary, consulClient) + if err != nil { + c.log.Error(err.Error()) + return 1 + } + } + // If namespaces are enabled, to allow cross-Consul-namespace permissions // for services from k8s, the Consul `default` namespace needs a policy // allowing service discovery in all namespaces. Each namespace that is @@ -389,12 +412,17 @@ func (c *Command) Run(args []string) int { // connect inject) needs to reference this policy on namespace creation // to finish the cross namespace permission setup. if c.flagEnableNamespaces { + crossNamespaceRule, err := c.crossNamespaceRule() + if err != nil { + c.log.Error("Error templating cross namespace rules", "err", err) + return 1 + } policyTmpl := api.ACLPolicy{ Name: "cross-namespace-policy", Description: "Policy to allow permissions to cross Consul namespaces for k8s services", - Rules: crossNamespaceRules, + Rules: crossNamespaceRule, } - err := c.untilSucceeds(fmt.Sprintf("creating %s policy", policyTmpl.Name), + err = c.untilSucceeds(fmt.Sprintf("creating %s policy", policyTmpl.Name), func() error { return c.createOrUpdateACLPolicy(policyTmpl, consulClient) }) @@ -497,7 +525,12 @@ func (c *Command) Run(args []string) int { } if c.flagCreateEntLicenseToken { - err := c.createLocalACL("enterprise-license", entLicenseRules, consulDC, isPrimary, consulClient) + var err error + if c.flagEnablePartitions { + err = c.createLocalACL("enterprise-license", entPartitionLicenseRules, consulDC, isPrimary, consulClient) + } else { + err = c.createLocalACL("enterprise-license", entLicenseRules, consulDC, isPrimary, consulClient) + } if err != nil { c.log.Error(err.Error()) return 1 @@ -827,10 +860,17 @@ func (c *Command) validateFlags() error { ) } + if c.flagEnablePartitions && c.flagPartitionName == "" { + return errors.New("-partition must be set if -enable-partitions is true") + } + if !c.flagEnablePartitions && c.flagPartitionName != "" { + return fmt.Errorf("-enable-partitions must be 'true' if setting -partition to %s", c.flagPartitionName) + } return nil } const consulDefaultNamespace = "default" +const consulDefaultPartition = "default" const synopsis = "Initialize ACLs on Consul servers and other components." const help = ` Usage: consul-k8s-control-plane server-acl-init [options] diff --git a/control-plane/subcommand/server-acl-init/command_ent_test.go b/control-plane/subcommand/server-acl-init/command_ent_test.go index 57270644b6..26842e8d6b 100644 --- a/control-plane/subcommand/server-acl-init/command_ent_test.go +++ b/control-plane/subcommand/server-acl-init/command_ent_test.go @@ -19,7 +19,6 @@ import ( // and there's a single consul destination namespace. func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { t.Parallel() - consulDestNamespaces := []string{"default", "destination"} for _, consulDestNamespace := range consulDestNamespaces { t.Run(consulDestNamespace, func(tt *testing.T) { @@ -40,6 +39,8 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-create-inject-token", + "-enable-partitions", + "-partition=default", "-enable-namespaces", "-consul-inject-destination-namespace", consulDestNamespace, "-acl-binding-rule-selector=serviceaccount.name!=default", @@ -160,6 +161,8 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-create-inject-token", + "-enable-partitions", + "-partition=default", "-enable-namespaces", "-enable-inject-k8s-namespace-mirroring", "-inject-k8s-namespace-mirroring-prefix", c.MirroringPrefix, @@ -203,7 +206,7 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { } } -// Test that ACL policies get updated if namespaces config changes. +// Test that ACL policies get updated if namespaces/partition config changes. func TestRun_ACLPolicyUpdates(t *testing.T) { t.Parallel() @@ -234,9 +237,11 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { "-terminating-gateway-name=anothergw", "-create-controller-token", } - // Our second run, we're going to update from namespaces disabled to - // namespaces enabled with a single destination ns. + // Our second run, we're going to update from partitions and namespaces disabled to + // namespaces enabled with a single destination ns and partitions enabled. secondRunArgs := append(firstRunArgs, + "-enable-partitions", + "-partition=default", "-enable-namespaces", "-consul-sync-destination-namespace=sync", "-consul-inject-destination-namespace=dest") @@ -322,6 +327,7 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { "gw-terminating-gateway-token", "anothergw-terminating-gateway-token", "controller-token", + "partitions-token", } policies, _, err = consul.ACL().PolicyList(nil) require.NoError(err) @@ -348,10 +354,13 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { case "connect-inject-token": // The connect inject token doesn't have namespace config, // but does change to operator:write from an empty string. - require.Contains(actRules, "operator = \"write\"") + require.Contains(actRules, "policy = \"write\"") case "client-snapshot-agent-token", "enterprise-license-token": // The snapshot agent and enterprise license tokens shouldn't change. require.NotContains(actRules, "namespace") + require.Contains(actRules, "acl = \"write\"") + case "partitions-token": + require.Contains(actRules, "operator = \"write\"") default: // Assert that the policies have the word namespace in them. This // tests that they were updated. The actual contents are tested @@ -528,6 +537,8 @@ func TestRun_ConnectInject_Updates(t *testing.T) { "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, + "-enable-partitions", + "-partition=default", "-create-inject-token", } @@ -693,6 +704,13 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { SecretNames: []string{resourcePrefix + "-controller-acl-token"}, LocalToken: false, }, + "partitions token": { + TokenFlags: []string{"-enable-partitions", "-partition=default"}, + PolicyNames: []string{"partitions-token"}, + PolicyDCs: []string{"dc1"}, + SecretNames: []string{resourcePrefix + "-partitions-acl-token"}, + LocalToken: true, + }, } for testName, c := range cases { t.Run(testName, func(t *testing.T) { @@ -713,6 +731,8 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, + "-enable-partitions", + "-partition=default", "-enable-namespaces", }, c.TokenFlags...) @@ -779,37 +799,43 @@ func TestRun_GatewayNamespaceParsing(t *testing.T) { "gateway-ingress-gateway-token", "another-gateway-ingress-gateway-token"}, ExpectedPolicies: []string{` -namespace "default" { - service "ingress" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" +partition "default" { + namespace "default" { + service "ingress" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } } }`, ` -namespace "default" { - service "gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" +partition "default" { + namespace "default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } } }`, ` -namespace "default" { - service "another-gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" +partition "default" { + namespace "default" { + service "another-gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } } }`}, }, @@ -822,37 +848,43 @@ namespace "default" { "gateway-ingress-gateway-token", "another-gateway-ingress-gateway-token"}, ExpectedPolicies: []string{` -namespace "default" { - service "ingress" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" +partition "default" { + namespace "default" { + service "ingress" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } } }`, ` -namespace "namespace1" { - service "gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" +partition "default" { + namespace "namespace1" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } } }`, ` -namespace "namespace2" { - service "another-gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" +partition "default" { + namespace "namespace2" { + service "another-gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } } }`}, }, @@ -865,28 +897,34 @@ namespace "namespace2" { "gateway-terminating-gateway-token", "another-gateway-terminating-gateway-token"}, ExpectedPolicies: []string{` -namespace "default" { - service "terminating" { - policy = "write" - } - node_prefix "" { - policy = "read" +partition "default" { + namespace "default" { + service "terminating" { + policy = "write" + } + node_prefix "" { + policy = "read" + } } }`, ` -namespace "default" { - service "gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" +partition "default" { + namespace "default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } } }`, ` -namespace "default" { - service "another-gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" +partition "default" { + namespace "default" { + service "another-gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } } }`}, }, @@ -899,28 +937,34 @@ namespace "default" { "gateway-terminating-gateway-token", "another-gateway-terminating-gateway-token"}, ExpectedPolicies: []string{` -namespace "default" { - service "terminating" { - policy = "write" - } - node_prefix "" { - policy = "read" +partition "default" { + namespace "default" { + service "terminating" { + policy = "write" + } + node_prefix "" { + policy = "read" + } } }`, ` -namespace "namespace1" { - service "gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" +partition "default" { + namespace "namespace1" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } } }`, ` -namespace "namespace2" { - service "another-gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" +partition "default" { + namespace "namespace2" { + service "another-gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } } }`}, }, @@ -944,6 +988,8 @@ namespace "namespace2" { "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-enable-namespaces=true", + "-enable-partitions", + "-partition=default", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) diff --git a/control-plane/subcommand/server-acl-init/rules.go b/control-plane/subcommand/server-acl-init/rules.go index 1b8a64b704..ec1a474a6f 100644 --- a/control-plane/subcommand/server-acl-init/rules.go +++ b/control-plane/subcommand/server-acl-init/rules.go @@ -7,6 +7,8 @@ import ( ) type rulesData struct { + EnablePartitions bool + PartitionName string EnableNamespaces bool SyncConsulDestNS string SyncEnableNSMirroring bool @@ -35,28 +37,55 @@ service "consul-snapshot" { }` const entLicenseRules = `operator = "write"` +const entPartitionLicenseRules = `acl = "write"` -const crossNamespaceRules = `namespace_prefix "" { - service_prefix "" { - policy = "read" +const partitionRules = `operator = "write" +agent_prefix "" { + policy = "read" +} +partition_prefix "" { + namespace_prefix "" { + acl = "write" } - node_prefix "" { - policy = "read" +}` + +func (c *Command) crossNamespaceRule() (string, error) { + crossNamespaceRulesTpl := `{{- if .EnablePartitions }} +partition "{{ .PartitionName }}" { +{{- end }} + namespace_prefix "" { + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } } -} ` +{{- if .EnablePartitions }} +} +{{- end }}` + + return c.renderRules(crossNamespaceRulesTpl) +} func (c *Command) agentRules() (string, error) { agentRulesTpl := ` +{{- if .EnablePartitions }} +partition "{{ .PartitionName }}" { +{{- end }} node_prefix "" { policy = "write" } {{- if .EnableNamespaces }} -namespace_prefix "" { + namespace_prefix "" { {{- end }} - service_prefix "" { - policy = "read" - } + service_prefix "" { + policy = "read" + } {{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} } {{- end }} ` @@ -80,16 +109,22 @@ func (c *Command) anonymousTokenRules() (string, error) { // ACL token. Thus the anonymous policy must // allow reading all services. anonTokenRulesTpl := ` +{{- if .EnablePartitions }} +partition_prefix "" { +{{- end }} {{- if .EnableNamespaces }} -namespace_prefix "" { + namespace_prefix "" { {{- end }} - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } {{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} } {{- end }} ` @@ -133,19 +168,25 @@ namespace_prefix "" { func (c *Command) ingressGatewayRules(name, namespace string) (string, error) { ingressGatewayRulesTpl := ` +{{- if .EnablePartitions }} +partition "{{ .PartitionName }}" { +{{- end }} {{- if .EnableNamespaces }} -namespace "{{ .GatewayNamespace }}" { + namespace "{{ .GatewayNamespace }}" { {{- end }} - service "{{ .GatewayName }}" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - } + service "{{ .GatewayName }}" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } {{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} } {{- end }} ` @@ -159,16 +200,22 @@ namespace "{{ .GatewayNamespace }}" { // of the initial implementation func (c *Command) terminatingGatewayRules(name, namespace string) (string, error) { terminatingGatewayRulesTpl := ` +{{- if .EnablePartitions }} +partition "{{ .PartitionName }}" { +{{- end }} {{- if .EnableNamespaces }} -namespace "{{ .GatewayNamespace }}" { + namespace "{{ .GatewayNamespace }}" { {{- end }} - service "{{ .GatewayName }}" { - policy = "write" - } - node_prefix "" { - policy = "read" - } + service "{{ .GatewayName }}" { + policy = "write" + } + node_prefix "" { + policy = "read" + } {{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} } {{- end }} ` @@ -207,22 +254,33 @@ func (c *Command) injectRules() (string, error) { // The Connect injector needs permissions to create namespaces when namespaces are enabled. // It must also create/update service health checks via the endpoints controller. // When ACLs are enabled, the endpoints controller needs "acl:write" permissions - // to delete ACL tokens created via "consul login". + // to delete ACL tokens created via "consul login". policy = "write" is required when + // creating namespaces within a partition. injectRulesTpl := ` +{{- if .EnablePartitions }} +partition "{{ .PartitionName }}" { +{{- else }} {{- if .EnableNamespaces }} -operator = "write" + operator = "write" {{- end }} -node_prefix "" { - policy = "write" -} -{{- if .EnableNamespaces }} -namespace_prefix "" { {{- end }} - acl = "write" - service_prefix "" { + node_prefix "" { policy = "write" } {{- if .EnableNamespaces }} + namespace_prefix "" { +{{- end }} +{{- if .EnablePartitions }} + policy = "write" +{{- end }} + acl = "write" + service_prefix "" { + policy = "write" + } +{{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} } {{- end }}` return c.renderRules(injectRulesTpl) @@ -237,43 +295,62 @@ func (c *Command) aclReplicationRules() (string, error) { // datacenters during federation since in order to start ACL replication, // we need a token with both replication and agent permissions. aclReplicationRulesTpl := ` -operator = "write" -agent_prefix "" { - policy = "read" -} -node_prefix "" { - policy = "write" -} -{{- if .EnableNamespaces }} -namespace_prefix "" { +{{- if .EnablePartitions }} +partition "default" { {{- end }} - acl = "write" - service_prefix "" { + operator = "write" + agent_prefix "" { policy = "read" - intentions = "read" } + node_prefix "" { + policy = "write" + } +{{- if .EnableNamespaces }} + namespace_prefix "" { +{{- end }} + acl = "write" + service_prefix "" { + policy = "read" + intentions = "read" + } {{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} } {{- end }} ` return c.renderRules(aclReplicationRulesTpl) } +// policy = "write" is required when creating namespaces within a partition. func (c *Command) controllerRules() (string, error) { controllerRules := ` -operator = "write" +{{- if .EnablePartitions }} +partition "{{ .PartitionName }}" { + mesh = "write" + acl = "write" +{{- else }} + operator = "write" +{{- end }} {{- if .EnableNamespaces }} {{- if .InjectEnableNSMirroring }} -namespace_prefix "{{ .InjectNSMirroringPrefix }}" { + namespace_prefix "{{ .InjectNSMirroringPrefix }}" { {{- else }} -namespace "{{ .InjectConsulDestNS }}" { + namespace "{{ .InjectConsulDestNS }}" { {{- end }} {{- end }} - service_prefix "" { +{{- if .EnablePartitions }} policy = "write" - intentions = "write" - } +{{- end }} + service_prefix "" { + policy = "write" + intentions = "write" + } {{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} } {{- end }} ` @@ -282,6 +359,8 @@ namespace "{{ .InjectConsulDestNS }}" { func (c *Command) rulesData() rulesData { return rulesData{ + EnablePartitions: c.flagEnablePartitions, + PartitionName: c.flagPartitionName, EnableNamespaces: c.flagEnableNamespaces, SyncConsulDestNS: c.flagConsulSyncDestinationNamespace, SyncEnableNSMirroring: c.flagEnableSyncK8SNSMirroring, diff --git a/control-plane/subcommand/server-acl-init/rules_test.go b/control-plane/subcommand/server-acl-init/rules_test.go index 1160236eef..9914f23585 100644 --- a/control-plane/subcommand/server-acl-init/rules_test.go +++ b/control-plane/subcommand/server-acl-init/rules_test.go @@ -10,28 +10,48 @@ import ( func TestAgentRules(t *testing.T) { cases := []struct { Name string + EnablePartitions bool + PartitionName string EnableNamespaces bool Expected string }{ { - "Namespaces are disabled", - false, - `node_prefix "" { + Name: "Namespaces and Partitions are disabled", + Expected: ` + node_prefix "" { policy = "write" } - service_prefix "" { - policy = "read" + service_prefix "" { + policy = "read" + }`, + }, + { + Name: "Namespaces are enabled, Partitions are disabled", + EnableNamespaces: true, + Expected: ` + node_prefix "" { + policy = "write" + } + namespace_prefix "" { + service_prefix "" { + policy = "read" + } }`, }, { - "Namespaces are enabled", - true, - `node_prefix "" { + Name: "Namespaces and Partitions are enabled", + EnablePartitions: true, + PartitionName: "part-1", + EnableNamespaces: true, + Expected: ` +partition "part-1" { + node_prefix "" { policy = "write" } -namespace_prefix "" { - service_prefix "" { - policy = "read" + namespace_prefix "" { + service_prefix "" { + policy = "read" + } } }`, }, @@ -39,16 +59,16 @@ namespace_prefix "" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - require := require.New(t) - cmd := Command{ + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } agentRules, err := cmd.agentRules() - require.NoError(err) - require.Equal(tt.Expected, agentRules) + require.NoError(t, err) + require.Equal(t, tt.Expected, agentRules) }) } } @@ -56,30 +76,48 @@ namespace_prefix "" { func TestAnonymousTokenRules(t *testing.T) { cases := []struct { Name string + EnablePartitions bool + PartitionName string EnableNamespaces bool Expected string }{ { - "Namespaces are disabled", - false, - ` - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" + Name: "Namespaces and Partitions are disabled", + Expected: ` + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + }`, + }, + { + Name: "Namespaces are enabled, Partitions are disabled", + EnableNamespaces: true, + Expected: ` + namespace_prefix "" { + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } }`, }, { - "Namespaces are enabled", - true, - ` -namespace_prefix "" { - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" + Name: "Namespaces and Partitions are enabled", + EnablePartitions: true, + PartitionName: "part-2", + EnableNamespaces: true, + Expected: ` +partition_prefix "" { + namespace_prefix "" { + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } } }`, }, @@ -87,16 +125,16 @@ namespace_prefix "" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - require := require.New(t) - cmd := Command{ + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } rules, err := cmd.anonymousTokenRules() - require.NoError(err) - require.Equal(tt.Expected, rules) + require.NoError(t, err) + require.Equal(t, tt.Expected, rules) }) } } @@ -108,9 +146,8 @@ func TestMeshGatewayRules(t *testing.T) { Expected string }{ { - "Namespaces are disabled", - false, - `agent_prefix "" { + Name: "Namespaces are disabled", + Expected: `agent_prefix "" { policy = "read" } service "mesh-gateway" { @@ -124,9 +161,9 @@ func TestMeshGatewayRules(t *testing.T) { }`, }, { - "Namespaces are enabled", - true, - `agent_prefix "" { + Name: "Namespaces are enabled", + EnableNamespaces: true, + Expected: `agent_prefix "" { policy = "read" } namespace "default" { @@ -147,16 +184,14 @@ namespace_prefix "" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - require := require.New(t) - cmd := Command{ flagEnableNamespaces: tt.EnableNamespaces, } meshGatewayRules, err := cmd.meshGatewayRules() - require.NoError(err) - require.Equal(tt.Expected, meshGatewayRules) + require.NoError(t, err) + require.Equal(t, tt.Expected, meshGatewayRules) }) } } @@ -166,58 +201,102 @@ func TestIngressGatewayRules(t *testing.T) { Name string GatewayName string GatewayNamespace string + EnablePartitions bool + PartitionName string EnableNamespaces bool Expected string }{ { - "Namespaces are disabled", - "ingress-gateway", - "", - false, - ` - service "ingress-gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" + Name: "Namespaces and Partitions are disabled", + GatewayName: "ingress-gateway", + Expected: ` + service "ingress-gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + }`, + }, + { + Name: "Namespaces are enabled, Partitions are disabled", + GatewayName: "gateway", + GatewayNamespace: "default", + EnableNamespaces: true, + Expected: ` + namespace "default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } }`, }, { - "Namespaces are enabled", - "gateway", - "default", - true, - ` -namespace "default" { - service "gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" + Name: "Namespaces are enabled, non-default namespace, Partitions are disabled", + GatewayName: "gateway", + GatewayNamespace: "non-default", + EnableNamespaces: true, + Expected: ` + namespace "non-default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } + }`, + }, + { + Name: "Namespaces and Partitions are enabled", + GatewayName: "gateway", + GatewayNamespace: "default", + EnableNamespaces: true, + EnablePartitions: true, + PartitionName: "part-1", + Expected: ` +partition "part-1" { + namespace "default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } } }`, }, { - "Namespaces are enabled, non-default namespace", - "gateway", - "non-default", - true, - ` -namespace "non-default" { - service "gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" + Name: "Namespaces and Partitions are enabled, non-default namespace", + GatewayName: "gateway", + GatewayNamespace: "non-default", + EnableNamespaces: true, + EnablePartitions: true, + PartitionName: "default", + Expected: ` +partition "default" { + namespace "non-default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } } }`, }, @@ -225,16 +304,16 @@ namespace "non-default" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - require := require.New(t) - cmd := Command{ + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } ingressGatewayRules, err := cmd.ingressGatewayRules(tt.GatewayName, tt.GatewayNamespace) - require.NoError(err) - require.Equal(tt.Expected, ingressGatewayRules) + require.NoError(t, err) + require.Equal(t, tt.Expected, ingressGatewayRules) }) } } @@ -245,48 +324,86 @@ func TestTerminatingGatewayRules(t *testing.T) { GatewayName string GatewayNamespace string EnableNamespaces bool + EnablePartitions bool + PartitionName string Expected string }{ { - "Namespaces are disabled", - "terminating-gateway", - "", - false, - ` - service "terminating-gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" + Name: "Namespaces and Partitions are disabled", + GatewayName: "terminating-gateway", + Expected: ` + service "terminating-gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + }`, + }, + { + Name: "Namespaces are enabled, Partitions are disabled", + GatewayName: "gateway", + GatewayNamespace: "default", + EnableNamespaces: true, + Expected: ` + namespace "default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } }`, }, { - "Namespaces are enabled", - "gateway", - "default", - true, - ` -namespace "default" { - service "gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" + Name: "Namespaces are enabled, non-default namespace, Partitions are disabled", + GatewayName: "gateway", + GatewayNamespace: "non-default", + EnableNamespaces: true, + Expected: ` + namespace "non-default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } + }`, + }, + { + Name: "Namespaces and Partitions are enabled", + GatewayName: "gateway", + GatewayNamespace: "default", + EnableNamespaces: true, + EnablePartitions: true, + PartitionName: "part-1", + Expected: ` +partition "part-1" { + namespace "default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } } }`, }, { - "Namespaces are enabled, non-default namespace", - "gateway", - "non-default", - true, - ` -namespace "non-default" { - service "gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" + Name: "Namespaces and Partitions are enabled, non-default namespace", + GatewayName: "gateway", + GatewayNamespace: "non-default", + EnableNamespaces: true, + EnablePartitions: true, + PartitionName: "default", + Expected: ` +partition "default" { + namespace "non-default" { + service "gateway" { + policy = "write" + } + node_prefix "" { + policy = "read" + } } }`, }, @@ -294,16 +411,16 @@ namespace "non-default" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - require := require.New(t) - cmd := Command{ + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } terminatingGatewayRules, err := cmd.terminatingGatewayRules(tt.GatewayName, tt.GatewayNamespace) - require.NoError(err) - require.Equal(tt.Expected, terminatingGatewayRules) + require.NoError(t, err) + require.Equal(t, tt.Expected, terminatingGatewayRules) }) } } @@ -319,13 +436,12 @@ func TestSyncRules(t *testing.T) { Expected string }{ { - "Namespaces are disabled", - false, - "sync-namespace", - true, - "prefix-", - "k8s-sync", - `node "k8s-sync" { + Name: "Namespaces are disabled", + ConsulSyncDestinationNamespace: "sync-namespace", + EnableSyncK8SNSMirroring: true, + SyncK8SNSMirroringPrefix: "prefix-", + SyncConsulNodeName: "k8s-sync", + Expected: `node "k8s-sync" { policy = "write" } node_prefix "" { @@ -336,13 +452,12 @@ func TestSyncRules(t *testing.T) { }`, }, { - "Namespaces are disabled, non-default node name", - false, - "sync-namespace", - true, - "prefix-", - "new-node-name", - `node "new-node-name" { + Name: "Namespaces are disabled, non-default node name", + ConsulSyncDestinationNamespace: "sync-namespace", + EnableSyncK8SNSMirroring: true, + SyncK8SNSMirroringPrefix: "prefix-", + SyncConsulNodeName: "new-node-name", + Expected: `node "new-node-name" { policy = "write" } node_prefix "" { @@ -353,13 +468,12 @@ func TestSyncRules(t *testing.T) { }`, }, { - "Namespaces are enabled, mirroring disabled", - true, - "sync-namespace", - false, - "prefix-", - "k8s-sync", - `node "k8s-sync" { + Name: "Namespaces are enabled, mirroring disabled", + EnableNamespaces: true, + ConsulSyncDestinationNamespace: "sync-namespace", + SyncK8SNSMirroringPrefix: "prefix-", + SyncConsulNodeName: "k8s-sync", + Expected: `node "k8s-sync" { policy = "write" } operator = "write" @@ -373,13 +487,12 @@ namespace "sync-namespace" { }`, }, { - "Namespaces are enabled, mirroring disabled, non-default node name", - true, - "sync-namespace", - false, - "prefix-", - "new-node-name", - `node "new-node-name" { + Name: "Namespaces are enabled, mirroring disabled, non-default node name", + EnableNamespaces: true, + ConsulSyncDestinationNamespace: "sync-namespace", + SyncK8SNSMirroringPrefix: "prefix-", + SyncConsulNodeName: "new-node-name", + Expected: `node "new-node-name" { policy = "write" } operator = "write" @@ -393,13 +506,12 @@ namespace "sync-namespace" { }`, }, { - "Namespaces are enabled, mirroring enabled, prefix empty", - true, - "sync-namespace", - true, - "", - "k8s-sync", - `node "k8s-sync" { + Name: "Namespaces are enabled, mirroring enabled, prefix empty", + EnableNamespaces: true, + ConsulSyncDestinationNamespace: "sync-namespace", + EnableSyncK8SNSMirroring: true, + SyncConsulNodeName: "k8s-sync", + Expected: `node "k8s-sync" { policy = "write" } operator = "write" @@ -413,13 +525,12 @@ namespace_prefix "" { }`, }, { - "Namespaces are enabled, mirroring enabled, prefix empty, non-default node name", - true, - "sync-namespace", - true, - "", - "new-node-name", - `node "new-node-name" { + Name: "Namespaces are enabled, mirroring enabled, prefix empty, non-default node name", + EnableNamespaces: true, + ConsulSyncDestinationNamespace: "sync-namespace", + EnableSyncK8SNSMirroring: true, + SyncConsulNodeName: "new-node-name", + Expected: `node "new-node-name" { policy = "write" } operator = "write" @@ -433,13 +544,13 @@ namespace_prefix "" { }`, }, { - "Namespaces are enabled, mirroring enabled, prefix defined", - true, - "sync-namespace", - true, - "prefix-", - "k8s-sync", - `node "k8s-sync" { + Name: "Namespaces are enabled, mirroring enabled, prefix defined", + EnableNamespaces: true, + ConsulSyncDestinationNamespace: "sync-namespace", + EnableSyncK8SNSMirroring: true, + SyncK8SNSMirroringPrefix: "prefix-", + SyncConsulNodeName: "k8s-sync", + Expected: `node "k8s-sync" { policy = "write" } operator = "write" @@ -453,13 +564,13 @@ namespace_prefix "prefix-" { }`, }, { - "Namespaces are enabled, mirroring enabled, prefix defined, non-default node name", - true, - "sync-namespace", - true, - "prefix-", - "new-node-name", - `node "new-node-name" { + Name: "Namespaces are enabled, mirroring enabled, prefix defined, non-default node name", + EnableNamespaces: true, + ConsulSyncDestinationNamespace: "sync-namespace", + EnableSyncK8SNSMirroring: true, + SyncK8SNSMirroringPrefix: "prefix-", + SyncConsulNodeName: "new-node-name", + Expected: `node "new-node-name" { policy = "write" } operator = "write" @@ -476,8 +587,6 @@ namespace_prefix "prefix-" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - require := require.New(t) - cmd := Command{ flagEnableNamespaces: tt.EnableNamespaces, flagConsulSyncDestinationNamespace: tt.ConsulSyncDestinationNamespace, @@ -488,8 +597,8 @@ namespace_prefix "prefix-" { syncRules, err := cmd.syncRules() - require.NoError(err) - require.Equal(tt.Expected, syncRules) + require.NoError(t, err) + require.Equal(t, tt.Expected, syncRules) }) } } @@ -498,48 +607,71 @@ namespace_prefix "prefix-" { func TestInjectRules(t *testing.T) { cases := []struct { EnableNamespaces bool + EnablePartitions bool + PartitionName string Expected string }{ { EnableNamespaces: false, + EnablePartitions: false, Expected: ` -node_prefix "" { - policy = "write" -} - acl = "write" - service_prefix "" { + node_prefix "" { + policy = "write" + } + acl = "write" + service_prefix "" { + policy = "write" + }`, + }, + { + EnableNamespaces: true, + EnablePartitions: false, + Expected: ` + operator = "write" + node_prefix "" { policy = "write" + } + namespace_prefix "" { + acl = "write" + service_prefix "" { + policy = "write" + } }`, }, { EnableNamespaces: true, + EnablePartitions: true, + PartitionName: "part-1", Expected: ` -operator = "write" -node_prefix "" { - policy = "write" -} -namespace_prefix "" { - acl = "write" - service_prefix "" { +partition "part-1" { + node_prefix "" { + policy = "write" + } + namespace_prefix "" { policy = "write" + acl = "write" + service_prefix "" { + policy = "write" + } } }`, }, } for _, tt := range cases { - caseName := fmt.Sprintf("ns=%t", tt.EnableNamespaces) + caseName := fmt.Sprintf("ns=%t, partition=%t", tt.EnableNamespaces, tt.EnablePartitions) t.Run(caseName, func(t *testing.T) { - require := require.New(t) cmd := Command{ + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } injectorRules, err := cmd.injectRules() - require.NoError(err) - require.Equal(tt.Expected, injectorRules) + require.NoError(t, err) + require.Equal(t, tt.Expected, injectorRules) }) } } @@ -548,39 +680,65 @@ func TestReplicationTokenRules(t *testing.T) { cases := []struct { Name string EnableNamespaces bool + EnablePartitions bool + PartitionName string Expected string }{ { - "Namespaces are disabled", - false, - `operator = "write" -agent_prefix "" { - policy = "read" -} -node_prefix "" { - policy = "write" -} - acl = "write" - service_prefix "" { + Name: "Namespaces and Partitions are disabled", + Expected: ` + operator = "write" + agent_prefix "" { policy = "read" - intentions = "read" + } + node_prefix "" { + policy = "write" + } + acl = "write" + service_prefix "" { + policy = "read" + intentions = "read" + }`, + }, + { + Name: "Namespaces are enabled, Partitions are disabled", + EnableNamespaces: true, + Expected: ` + operator = "write" + agent_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "write" + } + namespace_prefix "" { + acl = "write" + service_prefix "" { + policy = "read" + intentions = "read" + } }`, }, { - "Namespaces are enabled", - true, - `operator = "write" -agent_prefix "" { - policy = "read" -} -node_prefix "" { - policy = "write" -} -namespace_prefix "" { - acl = "write" - service_prefix "" { + Name: "Namespaces and Partitions are enabled, default partition", + EnableNamespaces: true, + EnablePartitions: true, + PartitionName: "default", + Expected: ` +partition "default" { + operator = "write" + agent_prefix "" { policy = "read" - intentions = "read" + } + node_prefix "" { + policy = "write" + } + namespace_prefix "" { + acl = "write" + service_prefix "" { + policy = "read" + intentions = "read" + } } }`, }, @@ -588,13 +746,14 @@ namespace_prefix "" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - require := require.New(t) cmd := Command{ + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } replicationTokenRules, err := cmd.aclReplicationRules() - require.NoError(err) - require.Equal(tt.Expected, replicationTokenRules) + require.NoError(t, err) + require.Equal(t, tt.Expected, replicationTokenRules) }) } } @@ -602,6 +761,8 @@ namespace_prefix "" { func TestControllerRules(t *testing.T) { cases := []struct { Name string + EnablePartitions bool + PartitionName string EnableNamespaces bool DestConsulNS string Mirroring bool @@ -609,48 +770,109 @@ func TestControllerRules(t *testing.T) { Expected string }{ { - Name: "namespaces=disabled", - EnableNamespaces: false, - Expected: `operator = "write" - service_prefix "" { - policy = "write" - intentions = "write" + Name: "namespaces=disabled, partitions=disabled", + Expected: ` + operator = "write" + service_prefix "" { + policy = "write" + intentions = "write" + }`, + }, + { + Name: "namespaces=enabled, consulDestNS=consul, partitions=disabled", + EnableNamespaces: true, + DestConsulNS: "consul", + Expected: ` + operator = "write" + namespace "consul" { + service_prefix "" { + policy = "write" + intentions = "write" + } }`, }, { - Name: "namespaces=enabled, consulDestNS=consul", + Name: "namespaces=enabled, mirroring=true, partitions=disabled", + EnableNamespaces: true, + Mirroring: true, + Expected: ` + operator = "write" + namespace_prefix "" { + service_prefix "" { + policy = "write" + intentions = "write" + } + }`, + }, + { + Name: "namespaces=enabled, mirroring=true, mirroringPrefix=prefix-, partitions=disabled", + EnableNamespaces: true, + Mirroring: true, + MirroringPrefix: "prefix-", + Expected: ` + operator = "write" + namespace_prefix "prefix-" { + service_prefix "" { + policy = "write" + intentions = "write" + } + }`, + }, + { + Name: "namespaces=enabled, consulDestNS=consul, partitions=enabled", + EnablePartitions: true, + PartitionName: "part-1", EnableNamespaces: true, DestConsulNS: "consul", - Expected: `operator = "write" -namespace "consul" { - service_prefix "" { + Expected: ` +partition "part-1" { + mesh = "write" + acl = "write" + namespace "consul" { policy = "write" - intentions = "write" + service_prefix "" { + policy = "write" + intentions = "write" + } } }`, }, { - Name: "namespaces=enabled, mirroring=true", + Name: "namespaces=enabled, mirroring=true, partitions=enabled", + EnablePartitions: true, + PartitionName: "part-1", EnableNamespaces: true, Mirroring: true, - Expected: `operator = "write" -namespace_prefix "" { - service_prefix "" { + Expected: ` +partition "part-1" { + mesh = "write" + acl = "write" + namespace_prefix "" { policy = "write" - intentions = "write" + service_prefix "" { + policy = "write" + intentions = "write" + } } }`, }, { - Name: "namespaces=enabled, mirroring=true, mirroringPrefix=prefix-", + Name: "namespaces=enabled, mirroring=true, mirroringPrefix=prefix-, partitions=enabled", + EnablePartitions: true, + PartitionName: "part-1", EnableNamespaces: true, Mirroring: true, MirroringPrefix: "prefix-", - Expected: `operator = "write" -namespace_prefix "prefix-" { - service_prefix "" { + Expected: ` +partition "part-1" { + mesh = "write" + acl = "write" + namespace_prefix "prefix-" { policy = "write" - intentions = "write" + service_prefix "" { + policy = "write" + intentions = "write" + } } }`, }, @@ -658,19 +880,19 @@ namespace_prefix "prefix-" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - require := require.New(t) - cmd := Command{ flagEnableNamespaces: tt.EnableNamespaces, flagConsulInjectDestinationNamespace: tt.DestConsulNS, flagEnableInjectK8SNSMirroring: tt.Mirroring, flagInjectK8SNSMirroringPrefix: tt.MirroringPrefix, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, } rules, err := cmd.controllerRules() - require.NoError(err) - require.Equal(tt.Expected, rules) + require.NoError(t, err) + require.Equal(t, tt.Expected, rules) }) } }