diff --git a/.circleci/config.yml b/.circleci/config.yml index 88d3aff594..a8ff807ba2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -646,7 +646,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-pod-security-policies -enable-transparent-proxy + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-pod-security-policies - store_test_results: path: /tmp/test-results @@ -701,7 +701,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" - store_test_results: path: /tmp/test-results @@ -762,7 +762,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" - store_test_results: path: /tmp/test-results @@ -857,7 +857,7 @@ jobs: - ~/.go_workspace/pkg/mod - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" - store_test_results: path: /tmp/test-results - store_artifacts: diff --git a/acceptance/framework/helpers/helpers.go b/acceptance/framework/helpers/helpers.go index 17dd805123..2c1a6ebf47 100644 --- a/acceptance/framework/helpers/helpers.go +++ b/acceptance/framework/helpers/helpers.go @@ -12,10 +12,9 @@ import ( "time" "github.com/gruntwork-io/terratest/modules/helm" - "github.com/hashicorp/consul/api" - "github.com/gruntwork-io/terratest/modules/random" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/acceptance/framework/k8s/helpers.go b/acceptance/framework/k8s/helpers.go index 1b895a5e17..3f981336ee 100644 --- a/acceptance/framework/k8s/helpers.go +++ b/acceptance/framework/k8s/helpers.go @@ -125,3 +125,17 @@ func ServiceHost(t *testing.T, cfg *config.TestConfig, ctx environment.TestConte return host } } + +// CopySecret copies a Kubernetes secret from one cluster to another. +func CopySecret(t *testing.T, sourceContext, destContext environment.TestContext, secretName string) { + t.Helper() + var secret *corev1.Secret + var err error + retry.Run(t, func(r *retry.R) { + secret, err = sourceContext.KubernetesClient(t).CoreV1().Secrets(sourceContext.KubectlOptions(t).Namespace).Get(context.Background(), secretName, metav1.GetOptions{}) + secret.ResourceVersion = "" + require.NoError(r, err) + }) + _, err = destContext.KubernetesClient(t).CoreV1().Secrets(destContext.KubectlOptions(t).Namespace).Create(context.Background(), secret, metav1.CreateOptions{}) + require.NoError(t, err) +} diff --git a/acceptance/tests/fixtures/bases/peering/peering-acceptor.yaml b/acceptance/tests/fixtures/bases/peering/peering-acceptor.yaml new file mode 100644 index 0000000000..3eff952833 --- /dev/null +++ b/acceptance/tests/fixtures/bases/peering/peering-acceptor.yaml @@ -0,0 +1,10 @@ +apiVersion: consul.hashicorp.com/v1alpha1 +kind: PeeringAcceptor +metadata: + name: server +spec: + peer: + secret: + name: "api-token" + key: "data" + backend: "kubernetes" \ No newline at end of file diff --git a/acceptance/tests/fixtures/bases/peering/peering-dialer.yaml b/acceptance/tests/fixtures/bases/peering/peering-dialer.yaml new file mode 100644 index 0000000000..ec125d7bb6 --- /dev/null +++ b/acceptance/tests/fixtures/bases/peering/peering-dialer.yaml @@ -0,0 +1,10 @@ +apiVersion: consul.hashicorp.com/v1alpha1 +kind: PeeringDialer +metadata: + name: client +spec: + peer: + secret: + name: "api-token" + key: "data" + backend: "kubernetes" \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/crd-peers/default-namespace/kustomization.yaml b/acceptance/tests/fixtures/cases/crd-peers/default-namespace/kustomization.yaml new file mode 100644 index 0000000000..499fdc5bc1 --- /dev/null +++ b/acceptance/tests/fixtures/cases/crd-peers/default-namespace/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../../../bases/exportedservices-default + +patchesStrategicMerge: +- patch.yaml diff --git a/acceptance/tests/fixtures/cases/crd-peers/default-namespace/patch.yaml b/acceptance/tests/fixtures/cases/crd-peers/default-namespace/patch.yaml new file mode 100644 index 0000000000..eed6bee4cc --- /dev/null +++ b/acceptance/tests/fixtures/cases/crd-peers/default-namespace/patch.yaml @@ -0,0 +1,10 @@ +apiVersion: consul.hashicorp.com/v1alpha1 +kind: ExportedServices +metadata: + name: default +spec: + services: + - name: static-server + namespace: default + consumers: + - peer: client diff --git a/acceptance/tests/fixtures/cases/crd-peers/default/kustomization.yaml b/acceptance/tests/fixtures/cases/crd-peers/default/kustomization.yaml new file mode 100644 index 0000000000..499fdc5bc1 --- /dev/null +++ b/acceptance/tests/fixtures/cases/crd-peers/default/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../../../bases/exportedservices-default + +patchesStrategicMerge: +- patch.yaml diff --git a/acceptance/tests/fixtures/cases/crd-peers/default/patch.yaml b/acceptance/tests/fixtures/cases/crd-peers/default/patch.yaml new file mode 100644 index 0000000000..b0dcd89ebb --- /dev/null +++ b/acceptance/tests/fixtures/cases/crd-peers/default/patch.yaml @@ -0,0 +1,9 @@ +apiVersion: consul.hashicorp.com/v1alpha1 +kind: ExportedServices +metadata: + name: default +spec: + services: + - name: static-server + consumers: + - peer: client diff --git a/acceptance/tests/fixtures/cases/crd-peers/non-default-namespace/kustomization.yaml b/acceptance/tests/fixtures/cases/crd-peers/non-default-namespace/kustomization.yaml new file mode 100644 index 0000000000..499fdc5bc1 --- /dev/null +++ b/acceptance/tests/fixtures/cases/crd-peers/non-default-namespace/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../../../bases/exportedservices-default + +patchesStrategicMerge: +- patch.yaml diff --git a/acceptance/tests/fixtures/cases/crd-peers/non-default-namespace/patch.yaml b/acceptance/tests/fixtures/cases/crd-peers/non-default-namespace/patch.yaml new file mode 100644 index 0000000000..4162a0f27b --- /dev/null +++ b/acceptance/tests/fixtures/cases/crd-peers/non-default-namespace/patch.yaml @@ -0,0 +1,10 @@ +apiVersion: consul.hashicorp.com/v1alpha1 +kind: ExportedServices +metadata: + name: default +spec: + services: + - name: static-server + namespace: ns1 + consumers: + - peer: client diff --git a/acceptance/tests/fixtures/cases/static-client-peers/default-namespace/kustomization.yaml b/acceptance/tests/fixtures/cases/static-client-peers/default-namespace/kustomization.yaml new file mode 100644 index 0000000000..7191edfb80 --- /dev/null +++ b/acceptance/tests/fixtures/cases/static-client-peers/default-namespace/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../../../bases/static-client + +patchesStrategicMerge: + - patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/static-client-peers/default-namespace/patch.yaml b/acceptance/tests/fixtures/cases/static-client-peers/default-namespace/patch.yaml new file mode 100644 index 0000000000..02ea8993ec --- /dev/null +++ b/acceptance/tests/fixtures/cases/static-client-peers/default-namespace/patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: static-client +spec: + template: + metadata: + annotations: + "consul.hashicorp.com/connect-inject": "true" + "consul.hashicorp.com/connect-service-upstreams": "static-server.svc.default.ns.server.peer:1234" \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/static-client-peers/default/kustomization.yaml b/acceptance/tests/fixtures/cases/static-client-peers/default/kustomization.yaml new file mode 100644 index 0000000000..7191edfb80 --- /dev/null +++ b/acceptance/tests/fixtures/cases/static-client-peers/default/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../../../bases/static-client + +patchesStrategicMerge: + - patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/static-client-peers/default/patch.yaml b/acceptance/tests/fixtures/cases/static-client-peers/default/patch.yaml new file mode 100644 index 0000000000..715485e0f8 --- /dev/null +++ b/acceptance/tests/fixtures/cases/static-client-peers/default/patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: static-client +spec: + template: + metadata: + annotations: + "consul.hashicorp.com/connect-inject": "true" + "consul.hashicorp.com/connect-service-upstreams": "static-server.svc.server.peer:1234" \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/static-client-peers/non-default-namespace/kustomization.yaml b/acceptance/tests/fixtures/cases/static-client-peers/non-default-namespace/kustomization.yaml new file mode 100644 index 0000000000..7191edfb80 --- /dev/null +++ b/acceptance/tests/fixtures/cases/static-client-peers/non-default-namespace/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../../../bases/static-client + +patchesStrategicMerge: + - patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/static-client-peers/non-default-namespace/patch.yaml b/acceptance/tests/fixtures/cases/static-client-peers/non-default-namespace/patch.yaml new file mode 100644 index 0000000000..fd622759a4 --- /dev/null +++ b/acceptance/tests/fixtures/cases/static-client-peers/non-default-namespace/patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: static-client +spec: + template: + metadata: + annotations: + "consul.hashicorp.com/connect-inject": "true" + "consul.hashicorp.com/connect-service-upstreams": "static-server.svc.ns1.ns.server.peer:1234" \ No newline at end of file diff --git a/acceptance/tests/partitions/partitions_connect_test.go b/acceptance/tests/partitions/partitions_connect_test.go index 369da09c26..d57e1af44a 100644 --- a/acceptance/tests/partitions/partitions_connect_test.go +++ b/acceptance/tests/partitions/partitions_connect_test.go @@ -138,19 +138,19 @@ func TestPartitions_Connect(t *testing.T) { caKeySecretName := fmt.Sprintf("%s-consul-ca-key", releaseName) logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) - copySecret(t, serverClusterContext, clientClusterContext, caCertSecretName) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, caCertSecretName) if !c.ACLsAndAutoEncryptEnabled { // When auto-encrypt is disabled, we need both // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) - copySecret(t, serverClusterContext, clientClusterContext, caKeySecretName) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, caKeySecretName) } partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) if c.ACLsAndAutoEncryptEnabled { logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) - copySecret(t, serverClusterContext, clientClusterContext, partitionToken) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, partitionToken) } partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) @@ -629,13 +629,3 @@ func TestPartitions_Connect(t *testing.T) { }) } } - -func copySecret(t *testing.T, sourceContext, destContext environment.TestContext, secretName string) { - t.Helper() - - secret, err := sourceContext.KubernetesClient(t).CoreV1().Secrets(sourceContext.KubectlOptions(t).Namespace).Get(context.Background(), secretName, metav1.GetOptions{}) - secret.ResourceVersion = "" - require.NoError(t, err) - _, err = destContext.KubernetesClient(t).CoreV1().Secrets(destContext.KubectlOptions(t).Namespace).Create(context.Background(), secret, metav1.CreateOptions{}) - require.NoError(t, err) -} diff --git a/acceptance/tests/partitions/partitions_sync_test.go b/acceptance/tests/partitions/partitions_sync_test.go index b3f551bd9d..49f265210e 100644 --- a/acceptance/tests/partitions/partitions_sync_test.go +++ b/acceptance/tests/partitions/partitions_sync_test.go @@ -128,19 +128,19 @@ func TestPartitions_Sync(t *testing.T) { caKeySecretName := fmt.Sprintf("%s-consul-ca-key", releaseName) logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) - copySecret(t, primaryClusterContext, secondaryClusterContext, caCertSecretName) + k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, caCertSecretName) if !c.ACLsAndAutoEncryptEnabled { // When auto-encrypt is disabled, we need both // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) - copySecret(t, primaryClusterContext, secondaryClusterContext, caKeySecretName) + k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, caKeySecretName) } partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) if c.ACLsAndAutoEncryptEnabled { logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) - copySecret(t, primaryClusterContext, secondaryClusterContext, partitionToken) + k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, partitionToken) } partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) diff --git a/acceptance/tests/peering/main_test.go b/acceptance/tests/peering/main_test.go new file mode 100644 index 0000000000..fff59f1e70 --- /dev/null +++ b/acceptance/tests/peering/main_test.go @@ -0,0 +1,22 @@ +package peering + +import ( + "fmt" + "os" + "testing" + + testsuite "github.com/hashicorp/consul-k8s/acceptance/framework/suite" +) + +var suite testsuite.Suite + +func TestMain(m *testing.M) { + suite = testsuite.NewSuite(m) + + if suite.Config().EnableMultiCluster { + os.Exit(suite.Run()) + } else { + fmt.Println("Skipping peering tests because -enable-multi-cluster is not set") + os.Exit(0) + } +} diff --git a/acceptance/tests/peering/peering_connect_namespaces_test.go b/acceptance/tests/peering/peering_connect_namespaces_test.go new file mode 100644 index 0000000000..9388eec360 --- /dev/null +++ b/acceptance/tests/peering/peering_connect_namespaces_test.go @@ -0,0 +1,301 @@ +package peering + +import ( + "context" + "strconv" + "testing" + + terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const staticClientName = "static-client" +const staticServerName = "static-server" +const staticServerNamespace = "ns1" +const staticClientNamespace = "ns2" + +// Test that Connect works in installations for X-Peers networking. +func TestPeering_ConnectNamespaces(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + + if !cfg.EnableEnterprise { + t.Skipf("skipping this test because -enable-enterprise is not set") + } + + if cfg.EnableTransparentProxy { + t.Skipf("skipping this test because Transparent Proxy is enabled") + } + + const staticServerPeer = "server" + const staticClientPeer = "client" + const defaultNamespace = "default" + cases := []struct { + name string + destinationNamespace string + mirrorK8S bool + ACLsAndAutoEncryptEnabled bool + }{ + { + "default destination namespace", + defaultNamespace, + false, + false, + }, + { + "single destination namespace", + staticServerNamespace, + false, + false, + }, + { + "mirror k8s namespaces", + staticServerNamespace, + true, + false, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + staticServerPeerClusterContext := env.DefaultContext(t) + staticClientPeerClusterContext := env.Context(t, environment.SecondaryContextName) + + commonHelmValues := map[string]string{ + "global.peering.enabled": "true", + "global.enableConsulNamespaces": "true", + + "global.image": "hashicorp/consul-enterprise:1.13.0-alpha2-ent", + + "global.tls.enabled": "false", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + + "connectInject.enabled": "true", + "connectInject.transparentProxy.defaultEnabled": "false", + // When mirroringK8S is set, this setting is ignored. + "connectInject.consulNamespaces.consulDestinationNamespace": c.destinationNamespace, + "connectInject.consulNamespaces.mirroringK8S": strconv.FormatBool(c.mirrorK8S), + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + + "controller.enabled": "true", + } + + staticServerPeerHelmValues := map[string]string{ + "global.datacenter": staticServerPeer, + } + + // On Kind, there are no load balancers but since all clusters + // share the same node network (docker bridge), we can use + // a NodePort service so that we can access node(s) in a different Kind cluster. + if cfg.UseKind { + staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" + staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" + staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" + } + + releaseName := helpers.RandomName() + + helpers.MergeMaps(staticServerPeerHelmValues, commonHelmValues) + + // Install the first peer where static-server will be deployed in the static-server kubernetes context. + staticServerPeerCluster := consul.NewHelmCluster(t, staticServerPeerHelmValues, staticServerPeerClusterContext, cfg, releaseName) + staticServerPeerCluster.Create(t) + + staticClientPeerHelmValues := map[string]string{ + "global.datacenter": staticClientPeer, + } + + if cfg.UseKind { + staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" + staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" + staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" + } + + helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) + + // Install the second peer where static-client will be deployed in the static-client kubernetes context. + staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) + staticClientPeerCluster.Create(t) + + // Create the peering acceptor on the client peer. + k8s.KubectlApply(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDelete(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") + }) + + // Copy secret from client peer to server peer. + k8s.CopySecret(t, staticClientPeerClusterContext, staticServerPeerClusterContext, "api-token") + + // Create the peering dialer on the server peer. + k8s.KubectlApply(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-dialer.yaml") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "delete", "secret", "api-token") + k8s.KubectlDelete(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-dialer.yaml") + }) + + staticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: staticServerPeerClusterContext.KubectlOptions(t).ContextName, + ConfigPath: staticServerPeerClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticServerNamespace, + } + staticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: staticClientPeerClusterContext.KubectlOptions(t).ContextName, + ConfigPath: staticClientPeerClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticClientNamespace, + } + + logger.Logf(t, "creating namespaces %s in server peer", staticServerNamespace) + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace) + }) + + logger.Logf(t, "creating namespaces %s in client peer", staticClientNamespace) + k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "create", "ns", staticClientNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "delete", "ns", staticClientNamespace) + }) + + staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + + serverQueryOpts := &api.QueryOptions{Namespace: staticServerNamespace} + clientQueryOpts := &api.QueryOptions{Namespace: staticClientNamespace} + + if !c.mirrorK8S { + serverQueryOpts = &api.QueryOptions{Namespace: c.destinationNamespace} + clientQueryOpts = &api.QueryOptions{Namespace: c.destinationNamespace} + } + + // Create a ProxyDefaults resource to configure services to use the mesh + // gateways. + logger.Log(t, "creating proxy-defaults config") + kustomizeDir := "../fixtures/bases/mesh-gateway" + + k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeDir) + }) + + k8s.KubectlApplyK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeDir) + }) + + logger.Log(t, "creating static-server in server peer") + k8s.DeployKustomize(t, staticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + + logger.Log(t, "creating static-client deployments in client peer") + if c.destinationNamespace == defaultNamespace { + k8s.DeployKustomize(t, staticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-peers/default-namespace") + } else { + k8s.DeployKustomize(t, staticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-peers/non-default-namespace") + } + // Check that both static-server and static-client have been injected and now have 2 containers. + podList, err := staticServerPeerClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: "app=static-server", + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 2) + + podList, err = staticClientPeerClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: "app=static-client", + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 2) + + // Make sure that services are registered in the correct namespace. + // If mirroring is enabled, we expect services to be registered in the + // Consul namespace with the same name as their source + // Kubernetes namespace. + // If a single destination namespace is set, we expect all services + // to be registered in that destination Consul namespace. + // Server cluster. + services, _, err := staticServerPeerClient.Catalog().Service(staticServerName, "", serverQueryOpts) + require.NoError(t, err) + require.Len(t, services, 1) + + // Client cluster. + services, _, err = staticClientPeerClient.Catalog().Service(staticClientName, "", clientQueryOpts) + require.NoError(t, err) + require.Len(t, services, 1) + + logger.Log(t, "creating exported services") + if c.destinationNamespace == defaultNamespace { + k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default-namespace") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default-namespace") + }) + } else { + k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/non-default-namespace") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/non-default-namespace") + }) + } + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") + + denyAllIntention := &api.ServiceIntentionsConfigEntry{ + Name: "*", + Kind: api.ServiceIntentions, + Namespace: "*", + Sources: []*api.SourceIntention{ + { + Name: "*", + Namespace: "*", + Action: api.IntentionActionDeny, + Peer: staticClientPeer, + }, + }, + } + _, _, err = staticServerPeerClient.ConfigEntries().Set(denyAllIntention, &api.WriteOptions{}) + require.NoError(t, err) + + logger.Log(t, "checking that the connection is not successful because there's no allow intention") + k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, staticClientName, "http://localhost:1234") + + intention := &api.ServiceIntentionsConfigEntry{ + Name: staticServerName, + Kind: api.ServiceIntentions, + Namespace: staticServerNamespace, + Sources: []*api.SourceIntention{ + { + Name: staticClientName, + Namespace: staticClientNamespace, + Action: api.IntentionActionAllow, + Peer: staticClientPeer, + }, + }, + } + + // Set the destination namespace to be the same + // unless mirrorK8S is true. + if !c.mirrorK8S { + intention.Namespace = c.destinationNamespace + intention.Sources[0].Namespace = c.destinationNamespace + } + + logger.Log(t, "creating intentions in server peer") + _, _, err = staticServerPeerClient.ConfigEntries().Set(intention, &api.WriteOptions{}) + require.NoError(t, err) + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") + }) + } +} diff --git a/acceptance/tests/peering/peering_connect_test.go b/acceptance/tests/peering/peering_connect_test.go new file mode 100644 index 0000000000..6ef66bdca2 --- /dev/null +++ b/acceptance/tests/peering/peering_connect_test.go @@ -0,0 +1,235 @@ +package peering + +import ( + "context" + "strconv" + "testing" + + terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Test that Connect works in installations for X-Peers networking. +func TestPeering_Connect(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + + if cfg.EnableTransparentProxy { + t.Skipf("skipping this test because Transparent Proxy is enabled") + } + + const staticServerPeer = "server" + const staticClientPeer = "client" + cases := []struct { + name string + ACLsAndAutoEncryptEnabled bool + }{ + { + "default installation", + false, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + staticServerPeerClusterContext := env.DefaultContext(t) + staticClientPeerClusterContext := env.Context(t, environment.SecondaryContextName) + + commonHelmValues := map[string]string{ + "global.peering.enabled": "true", + + "global.image": "hashicorp/consul:1.13.0-alpha2", + + "global.tls.enabled": "false", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + + "connectInject.enabled": "true", + "connectInject.transparentProxy.defaultEnabled": "false", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + + "controller.enabled": "true", + } + + staticServerPeerHelmValues := map[string]string{ + "global.datacenter": staticServerPeer, + } + + // On Kind, there are no load balancers but since all clusters + // share the same node network (docker bridge), we can use + // a NodePort service so that we can access node(s) in a different Kind cluster. + if cfg.UseKind { + staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" + staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" + staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" + } + + releaseName := helpers.RandomName() + + helpers.MergeMaps(staticServerPeerHelmValues, commonHelmValues) + + // Install the first peer where static-server will be deployed in the static-server kubernetes context. + staticServerPeerCluster := consul.NewHelmCluster(t, staticServerPeerHelmValues, staticServerPeerClusterContext, cfg, releaseName) + staticServerPeerCluster.Create(t) + + staticClientPeerHelmValues := map[string]string{ + "global.datacenter": staticClientPeer, + } + + if cfg.UseKind { + staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" + staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" + staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" + } + + helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) + + // Install the second peer where static-client will be deployed in the static-client kubernetes context. + staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) + staticClientPeerCluster.Create(t) + + // Create the peering acceptor on the client peer. + k8s.KubectlApply(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDelete(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") + }) + + // Copy secret from client peer to server peer. + k8s.CopySecret(t, staticClientPeerClusterContext, staticServerPeerClusterContext, "api-token") + + // Create the peering dialer on the server peer. + k8s.KubectlApply(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-dialer.yaml") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "delete", "secret", "api-token") + k8s.KubectlDelete(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-dialer.yaml") + }) + + staticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: staticServerPeerClusterContext.KubectlOptions(t).ContextName, + ConfigPath: staticServerPeerClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticServerNamespace, + } + staticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: staticClientPeerClusterContext.KubectlOptions(t).ContextName, + ConfigPath: staticClientPeerClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticClientNamespace, + } + + logger.Logf(t, "creating namespaces %s in server peer", staticServerNamespace) + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace) + }) + + logger.Logf(t, "creating namespaces %s in client peer", staticClientNamespace) + k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "create", "ns", staticClientNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "delete", "ns", staticClientNamespace) + }) + + staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + + // Create a ProxyDefaults resource to configure services to use the mesh + // gateways. + logger.Log(t, "creating proxy-defaults config") + kustomizeDir := "../fixtures/bases/mesh-gateway" + + k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeDir) + }) + + k8s.KubectlApplyK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeDir) + }) + + logger.Log(t, "creating static-server in server peer") + k8s.DeployKustomize(t, staticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + + logger.Log(t, "creating static-client deployments in client peer") + k8s.DeployKustomize(t, staticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-peers/default") + // Check that both static-server and static-client have been injected and now have 2 containers. + podList, err := staticServerPeerClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: "app=static-server", + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 2) + + podList, err = staticClientPeerClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: "app=static-client", + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 2) + + // Make sure that services are registered in the correct namespace. + // Server cluster. + services, _, err := staticServerPeerClient.Catalog().Service(staticServerName, "", &api.QueryOptions{}) + require.NoError(t, err) + require.Len(t, services, 1) + + // Client cluster. + services, _, err = staticClientPeerClient.Catalog().Service(staticClientName, "", &api.QueryOptions{}) + require.NoError(t, err) + require.Len(t, services, 1) + + logger.Log(t, "creating exported services") + k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default") + }) + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") + + denyAllIntention := &api.ServiceIntentionsConfigEntry{ + Name: "*", + Kind: api.ServiceIntentions, + Sources: []*api.SourceIntention{ + { + Name: "*", + Action: api.IntentionActionDeny, + Peer: staticClientPeer, + }, + }, + } + _, _, err = staticServerPeerClient.ConfigEntries().Set(denyAllIntention, &api.WriteOptions{}) + require.NoError(t, err) + + logger.Log(t, "checking that the connection is not successful because there's no allow intention") + k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, staticClientName, "http://localhost:1234") + + intention := &api.ServiceIntentionsConfigEntry{ + Name: staticServerName, + Kind: api.ServiceIntentions, + Sources: []*api.SourceIntention{ + { + Name: staticClientName, + Action: api.IntentionActionAllow, + Peer: staticClientPeer, + }, + }, + } + + logger.Log(t, "creating intentions in server peer") + _, _, err = staticServerPeerClient.ConfigEntries().Set(intention, &api.WriteOptions{}) + require.NoError(t, err) + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") + }) + } +} diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index 02318c3bc1..0eff7f8734 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -31,9 +31,8 @@ global: # [Experimental] Configures the Cluster Peering feature. Requires Consul v1.13+ and Consul-K8s v0.45+. peering: - # If true, the Helm chart will enable Cluster Peering for the cluster. This will enable peering controllers and - # allow use of the PeeringAcceptor and PeeringDialer CRDs to establish peerings for service mesh. - # @type boolean + # If true, the Helm chart enables Cluster Peering for the cluster. This option enables peering controllers and + # allows use of the PeeringAcceptor and PeeringDialer CRDs for establishing service mesh peerings. enabled: false # [Enterprise Only] Enabling `adminPartitions` allows creation of Admin Partitions in Kubernetes clusters.