diff --git a/acceptance/framework/helpers/helpers.go b/acceptance/framework/helpers/helpers.go index 17dd805123..2c1a6ebf47 100644 --- a/acceptance/framework/helpers/helpers.go +++ b/acceptance/framework/helpers/helpers.go @@ -12,10 +12,9 @@ import ( "time" "github.com/gruntwork-io/terratest/modules/helm" - "github.com/hashicorp/consul/api" - "github.com/gruntwork-io/terratest/modules/random" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/acceptance/framework/k8s/helpers.go b/acceptance/framework/k8s/helpers.go index 1b895a5e17..3f981336ee 100644 --- a/acceptance/framework/k8s/helpers.go +++ b/acceptance/framework/k8s/helpers.go @@ -125,3 +125,17 @@ func ServiceHost(t *testing.T, cfg *config.TestConfig, ctx environment.TestConte return host } } + +// CopySecret copies a Kubernetes secret from one cluster to another. +func CopySecret(t *testing.T, sourceContext, destContext environment.TestContext, secretName string) { + t.Helper() + var secret *corev1.Secret + var err error + retry.Run(t, func(r *retry.R) { + secret, err = sourceContext.KubernetesClient(t).CoreV1().Secrets(sourceContext.KubectlOptions(t).Namespace).Get(context.Background(), secretName, metav1.GetOptions{}) + secret.ResourceVersion = "" + require.NoError(r, err) + }) + _, err = destContext.KubernetesClient(t).CoreV1().Secrets(destContext.KubectlOptions(t).Namespace).Create(context.Background(), secret, metav1.CreateOptions{}) + require.NoError(t, err) +} diff --git a/acceptance/tests/fixtures/cases/crd-peers/default/kustomization.yaml b/acceptance/tests/fixtures/cases/crd-peers/default/kustomization.yaml new file mode 100644 index 0000000000..499fdc5bc1 --- /dev/null +++ b/acceptance/tests/fixtures/cases/crd-peers/default/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../../../bases/exportedservices-default + +patchesStrategicMerge: +- patch.yaml diff --git a/acceptance/tests/fixtures/cases/crd-peers/default/patch.yaml b/acceptance/tests/fixtures/cases/crd-peers/default/patch.yaml new file mode 100644 index 0000000000..b0dcd89ebb --- /dev/null +++ b/acceptance/tests/fixtures/cases/crd-peers/default/patch.yaml @@ -0,0 +1,9 @@ +apiVersion: consul.hashicorp.com/v1alpha1 +kind: ExportedServices +metadata: + name: default +spec: + services: + - name: static-server + consumers: + - peer: client diff --git a/acceptance/tests/fixtures/cases/static-client-peers/default/kustomization.yaml b/acceptance/tests/fixtures/cases/static-client-peers/default/kustomization.yaml new file mode 100644 index 0000000000..7191edfb80 --- /dev/null +++ b/acceptance/tests/fixtures/cases/static-client-peers/default/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../../../bases/static-client + +patchesStrategicMerge: + - patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/static-client-peers/default/patch.yaml b/acceptance/tests/fixtures/cases/static-client-peers/default/patch.yaml new file mode 100644 index 0000000000..715485e0f8 --- /dev/null +++ b/acceptance/tests/fixtures/cases/static-client-peers/default/patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: static-client +spec: + template: + metadata: + annotations: + "consul.hashicorp.com/connect-inject": "true" + "consul.hashicorp.com/connect-service-upstreams": "static-server.svc.server.peer:1234" \ No newline at end of file diff --git a/acceptance/tests/partitions/partitions_connect_test.go b/acceptance/tests/partitions/partitions_connect_test.go index 369da09c26..d57e1af44a 100644 --- a/acceptance/tests/partitions/partitions_connect_test.go +++ b/acceptance/tests/partitions/partitions_connect_test.go @@ -138,19 +138,19 @@ func TestPartitions_Connect(t *testing.T) { caKeySecretName := fmt.Sprintf("%s-consul-ca-key", releaseName) logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) - copySecret(t, serverClusterContext, clientClusterContext, caCertSecretName) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, caCertSecretName) if !c.ACLsAndAutoEncryptEnabled { // When auto-encrypt is disabled, we need both // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) - copySecret(t, serverClusterContext, clientClusterContext, caKeySecretName) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, caKeySecretName) } partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) if c.ACLsAndAutoEncryptEnabled { logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) - copySecret(t, serverClusterContext, clientClusterContext, partitionToken) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, partitionToken) } partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) @@ -629,13 +629,3 @@ func TestPartitions_Connect(t *testing.T) { }) } } - -func copySecret(t *testing.T, sourceContext, destContext environment.TestContext, secretName string) { - t.Helper() - - secret, err := sourceContext.KubernetesClient(t).CoreV1().Secrets(sourceContext.KubectlOptions(t).Namespace).Get(context.Background(), secretName, metav1.GetOptions{}) - secret.ResourceVersion = "" - require.NoError(t, err) - _, err = destContext.KubernetesClient(t).CoreV1().Secrets(destContext.KubectlOptions(t).Namespace).Create(context.Background(), secret, metav1.CreateOptions{}) - require.NoError(t, err) -} diff --git a/acceptance/tests/partitions/partitions_sync_test.go b/acceptance/tests/partitions/partitions_sync_test.go index b3f551bd9d..49f265210e 100644 --- a/acceptance/tests/partitions/partitions_sync_test.go +++ b/acceptance/tests/partitions/partitions_sync_test.go @@ -128,19 +128,19 @@ func TestPartitions_Sync(t *testing.T) { caKeySecretName := fmt.Sprintf("%s-consul-ca-key", releaseName) logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) - copySecret(t, primaryClusterContext, secondaryClusterContext, caCertSecretName) + k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, caCertSecretName) if !c.ACLsAndAutoEncryptEnabled { // When auto-encrypt is disabled, we need both // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) - copySecret(t, primaryClusterContext, secondaryClusterContext, caKeySecretName) + k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, caKeySecretName) } partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) if c.ACLsAndAutoEncryptEnabled { logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) - copySecret(t, primaryClusterContext, secondaryClusterContext, partitionToken) + k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, partitionToken) } partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) diff --git a/acceptance/tests/peering/peering_connect_namespaces_test.go b/acceptance/tests/peering/peering_connect_namespaces_test.go new file mode 100644 index 0000000000..9388eec360 --- /dev/null +++ b/acceptance/tests/peering/peering_connect_namespaces_test.go @@ -0,0 +1,301 @@ +package peering + +import ( + "context" + "strconv" + "testing" + + terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const staticClientName = "static-client" +const staticServerName = "static-server" +const staticServerNamespace = "ns1" +const staticClientNamespace = "ns2" + +// Test that Connect works in installations for X-Peers networking. +func TestPeering_ConnectNamespaces(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + + if !cfg.EnableEnterprise { + t.Skipf("skipping this test because -enable-enterprise is not set") + } + + if cfg.EnableTransparentProxy { + t.Skipf("skipping this test because Transparent Proxy is enabled") + } + + const staticServerPeer = "server" + const staticClientPeer = "client" + const defaultNamespace = "default" + cases := []struct { + name string + destinationNamespace string + mirrorK8S bool + ACLsAndAutoEncryptEnabled bool + }{ + { + "default destination namespace", + defaultNamespace, + false, + false, + }, + { + "single destination namespace", + staticServerNamespace, + false, + false, + }, + { + "mirror k8s namespaces", + staticServerNamespace, + true, + false, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + staticServerPeerClusterContext := env.DefaultContext(t) + staticClientPeerClusterContext := env.Context(t, environment.SecondaryContextName) + + commonHelmValues := map[string]string{ + "global.peering.enabled": "true", + "global.enableConsulNamespaces": "true", + + "global.image": "hashicorp/consul-enterprise:1.13.0-alpha2-ent", + + "global.tls.enabled": "false", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + + "connectInject.enabled": "true", + "connectInject.transparentProxy.defaultEnabled": "false", + // When mirroringK8S is set, this setting is ignored. + "connectInject.consulNamespaces.consulDestinationNamespace": c.destinationNamespace, + "connectInject.consulNamespaces.mirroringK8S": strconv.FormatBool(c.mirrorK8S), + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + + "controller.enabled": "true", + } + + staticServerPeerHelmValues := map[string]string{ + "global.datacenter": staticServerPeer, + } + + // On Kind, there are no load balancers but since all clusters + // share the same node network (docker bridge), we can use + // a NodePort service so that we can access node(s) in a different Kind cluster. + if cfg.UseKind { + staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" + staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" + staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" + } + + releaseName := helpers.RandomName() + + helpers.MergeMaps(staticServerPeerHelmValues, commonHelmValues) + + // Install the first peer where static-server will be deployed in the static-server kubernetes context. + staticServerPeerCluster := consul.NewHelmCluster(t, staticServerPeerHelmValues, staticServerPeerClusterContext, cfg, releaseName) + staticServerPeerCluster.Create(t) + + staticClientPeerHelmValues := map[string]string{ + "global.datacenter": staticClientPeer, + } + + if cfg.UseKind { + staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" + staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" + staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" + } + + helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) + + // Install the second peer where static-client will be deployed in the static-client kubernetes context. + staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) + staticClientPeerCluster.Create(t) + + // Create the peering acceptor on the client peer. + k8s.KubectlApply(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDelete(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") + }) + + // Copy secret from client peer to server peer. + k8s.CopySecret(t, staticClientPeerClusterContext, staticServerPeerClusterContext, "api-token") + + // Create the peering dialer on the server peer. + k8s.KubectlApply(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-dialer.yaml") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "delete", "secret", "api-token") + k8s.KubectlDelete(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-dialer.yaml") + }) + + staticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: staticServerPeerClusterContext.KubectlOptions(t).ContextName, + ConfigPath: staticServerPeerClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticServerNamespace, + } + staticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: staticClientPeerClusterContext.KubectlOptions(t).ContextName, + ConfigPath: staticClientPeerClusterContext.KubectlOptions(t).ConfigPath, + Namespace: staticClientNamespace, + } + + logger.Logf(t, "creating namespaces %s in server peer", staticServerNamespace) + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace) + }) + + logger.Logf(t, "creating namespaces %s in client peer", staticClientNamespace) + k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "create", "ns", staticClientNamespace) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "delete", "ns", staticClientNamespace) + }) + + staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + + serverQueryOpts := &api.QueryOptions{Namespace: staticServerNamespace} + clientQueryOpts := &api.QueryOptions{Namespace: staticClientNamespace} + + if !c.mirrorK8S { + serverQueryOpts = &api.QueryOptions{Namespace: c.destinationNamespace} + clientQueryOpts = &api.QueryOptions{Namespace: c.destinationNamespace} + } + + // Create a ProxyDefaults resource to configure services to use the mesh + // gateways. + logger.Log(t, "creating proxy-defaults config") + kustomizeDir := "../fixtures/bases/mesh-gateway" + + k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeDir) + }) + + k8s.KubectlApplyK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeDir) + }) + + logger.Log(t, "creating static-server in server peer") + k8s.DeployKustomize(t, staticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + + logger.Log(t, "creating static-client deployments in client peer") + if c.destinationNamespace == defaultNamespace { + k8s.DeployKustomize(t, staticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-peers/default-namespace") + } else { + k8s.DeployKustomize(t, staticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-peers/non-default-namespace") + } + // Check that both static-server and static-client have been injected and now have 2 containers. + podList, err := staticServerPeerClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: "app=static-server", + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 2) + + podList, err = staticClientPeerClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: "app=static-client", + }) + require.NoError(t, err) + require.Len(t, podList.Items, 1) + require.Len(t, podList.Items[0].Spec.Containers, 2) + + // Make sure that services are registered in the correct namespace. + // If mirroring is enabled, we expect services to be registered in the + // Consul namespace with the same name as their source + // Kubernetes namespace. + // If a single destination namespace is set, we expect all services + // to be registered in that destination Consul namespace. + // Server cluster. + services, _, err := staticServerPeerClient.Catalog().Service(staticServerName, "", serverQueryOpts) + require.NoError(t, err) + require.Len(t, services, 1) + + // Client cluster. + services, _, err = staticClientPeerClient.Catalog().Service(staticClientName, "", clientQueryOpts) + require.NoError(t, err) + require.Len(t, services, 1) + + logger.Log(t, "creating exported services") + if c.destinationNamespace == defaultNamespace { + k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default-namespace") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default-namespace") + }) + } else { + k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/non-default-namespace") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/non-default-namespace") + }) + } + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") + + denyAllIntention := &api.ServiceIntentionsConfigEntry{ + Name: "*", + Kind: api.ServiceIntentions, + Namespace: "*", + Sources: []*api.SourceIntention{ + { + Name: "*", + Namespace: "*", + Action: api.IntentionActionDeny, + Peer: staticClientPeer, + }, + }, + } + _, _, err = staticServerPeerClient.ConfigEntries().Set(denyAllIntention, &api.WriteOptions{}) + require.NoError(t, err) + + logger.Log(t, "checking that the connection is not successful because there's no allow intention") + k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, staticClientName, "http://localhost:1234") + + intention := &api.ServiceIntentionsConfigEntry{ + Name: staticServerName, + Kind: api.ServiceIntentions, + Namespace: staticServerNamespace, + Sources: []*api.SourceIntention{ + { + Name: staticClientName, + Namespace: staticClientNamespace, + Action: api.IntentionActionAllow, + Peer: staticClientPeer, + }, + }, + } + + // Set the destination namespace to be the same + // unless mirrorK8S is true. + if !c.mirrorK8S { + intention.Namespace = c.destinationNamespace + intention.Sources[0].Namespace = c.destinationNamespace + } + + logger.Log(t, "creating intentions in server peer") + _, _, err = staticServerPeerClient.ConfigEntries().Set(intention, &api.WriteOptions{}) + require.NoError(t, err) + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") + }) + } +} diff --git a/acceptance/tests/peering/peering_connect_test.go b/acceptance/tests/peering/peering_connect_test.go index 7fa9e622d9..12e181502e 100644 --- a/acceptance/tests/peering/peering_connect_test.go +++ b/acceptance/tests/peering/peering_connect_test.go @@ -6,62 +6,33 @@ import ( "testing" terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/config" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" "github.com/hashicorp/consul-k8s/acceptance/framework/environment" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const staticClientName = "static-client" -const staticServerName = "static-server" -const staticServerNamespace = "ns1" -const staticClientNamespace = "ns2" - // Test that Connect works in installations for X-Peers networking. func TestPeering_Connect(t *testing.T) { env := suite.Environment() cfg := suite.Config() - if !cfg.EnableEnterprise { - t.Skipf("skipping this test because -enable-enterprise is not set") - } - if cfg.EnableTransparentProxy { t.Skipf("skipping this test because Transparent Proxy is enabled") } const staticServerPeer = "server" const staticClientPeer = "client" - const defaultNamespace = "default" cases := []struct { name string - destinationNamespace string - mirrorK8S bool ACLsAndAutoEncryptEnabled bool }{ { - "default destination namespace", - defaultNamespace, - false, - false, - }, - { - "single destination namespace", - staticServerNamespace, - false, - false, - }, - { - "mirror k8s namespaces", - staticServerNamespace, - true, + "default installation", false, }, } @@ -75,7 +46,7 @@ func TestPeering_Connect(t *testing.T) { "global.peering.enabled": "true", "global.enableConsulNamespaces": "true", - "global.image": "thisisnotashwin/consul@sha256:58ce5c56fee4ffa6f347b988b1aa6fcd118b364018d28d57de8d455a7ea127e9", + "global.image": "hashicorp/consul:1.13.0-alpha2", "global.tls.enabled": "false", "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), @@ -85,9 +56,6 @@ func TestPeering_Connect(t *testing.T) { "connectInject.enabled": "true", "connectInject.transparentProxy.defaultEnabled": "false", - // When mirroringK8S is set, this setting is ignored. - "connectInject.consulNamespaces.consulDestinationNamespace": c.destinationNamespace, - "connectInject.consulNamespaces.mirroringK8S": strconv.FormatBool(c.mirrorK8S), "meshGateway.enabled": "true", "meshGateway.replicas": "1", @@ -139,11 +107,12 @@ func TestPeering_Connect(t *testing.T) { }) // Copy secret from client peer to server peer. - copySecret(t, cfg, staticClientPeerClusterContext, staticServerPeerClusterContext, "api-token") + k8s.CopySecret(t, staticClientPeerClusterContext, staticServerPeerClusterContext, "api-token") // Create the peering dialer on the server peer. k8s.KubectlApply(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-dialer.yaml") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.RunKubectl(t, staticServerPeerClusterContext.KubectlOptions(t), "delete", "secret", "api-token") k8s.KubectlDelete(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-dialer.yaml") }) @@ -173,14 +142,6 @@ func TestPeering_Connect(t *testing.T) { staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) - serverQueryOpts := &api.QueryOptions{Namespace: staticServerNamespace} - clientQueryOpts := &api.QueryOptions{Namespace: staticClientNamespace} - - if !c.mirrorK8S { - serverQueryOpts = &api.QueryOptions{Namespace: c.destinationNamespace} - clientQueryOpts = &api.QueryOptions{Namespace: c.destinationNamespace} - } - // Create a ProxyDefaults resource to configure services to use the mesh // gateways. logger.Log(t, "creating proxy-defaults config") @@ -200,11 +161,7 @@ func TestPeering_Connect(t *testing.T) { k8s.DeployKustomize(t, staticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") logger.Log(t, "creating static-client deployments in client peer") - if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, staticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-peers/default-namespace") - } else { - k8s.DeployKustomize(t, staticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-peers/non-default-namespace") - } + k8s.DeployKustomize(t, staticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-peers/default") // Check that both static-server and static-client have been injected and now have 2 containers. podList, err := staticServerPeerClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: "app=static-server", @@ -221,110 +178,59 @@ func TestPeering_Connect(t *testing.T) { require.Len(t, podList.Items[0].Spec.Containers, 2) // Make sure that services are registered in the correct namespace. - // If mirroring is enabled, we expect services to be registered in the - // Consul namespace with the same name as their source - // Kubernetes namespace. - // If a single destination namespace is set, we expect all services - // to be registered in that destination Consul namespace. // Server cluster. - services, _, err := staticServerPeerClient.Catalog().Service(staticServerName, "", serverQueryOpts) + services, _, err := staticServerPeerClient.Catalog().Service(staticServerName, "", &api.QueryOptions{}) require.NoError(t, err) require.Len(t, services, 1) // Client cluster. - services, _, err = staticClientPeerClient.Catalog().Service(staticClientName, "", clientQueryOpts) + services, _, err = staticClientPeerClient.Catalog().Service(staticClientName, "", &api.QueryOptions{}) require.NoError(t, err) require.Len(t, services, 1) logger.Log(t, "creating exported services") - if c.destinationNamespace == defaultNamespace { - k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default-namespace") - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default-namespace") - }) - } else { - k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/non-default-namespace") - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/non-default-namespace") - }) - } - + k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default") + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default") + }) logger.Log(t, "checking that connection is successful") k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") denyAllIntention := &api.ServiceIntentionsConfigEntry{ - Name: "*", - Kind: api.ServiceIntentions, - Namespace: "*", + Name: "*", + Kind: api.ServiceIntentions, Sources: []*api.SourceIntention{ { - Name: "*", - Namespace: "*", - Action: api.IntentionActionDeny, - Peer: staticClientPeer, + Name: "*", + Action: api.IntentionActionDeny, + Peer: staticClientPeer, }, }, } _, _, err = staticServerPeerClient.ConfigEntries().Set(denyAllIntention, &api.WriteOptions{}) require.NoError(t, err) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - _, err := staticServerPeerClient.ConfigEntries().Delete(api.ServiceIntentions, "*", &api.WriteOptions{}) - require.NoError(t, err) - }) - logger.Log(t, "checking that the connection is not successful because there's no allow intention") k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, staticClientName, "http://localhost:1234") intention := &api.ServiceIntentionsConfigEntry{ - Name: staticServerName, - Kind: api.ServiceIntentions, - Namespace: staticServerNamespace, + Name: staticServerName, + Kind: api.ServiceIntentions, Sources: []*api.SourceIntention{ { - Name: staticClientName, - Namespace: staticClientNamespace, - Action: api.IntentionActionAllow, - Peer: staticClientPeer, + Name: staticClientName, + Action: api.IntentionActionAllow, + Peer: staticClientPeer, }, }, } - // Set the destination namespace to be the same - // unless mirrorK8S is true. - if !c.mirrorK8S { - intention.Namespace = c.destinationNamespace - intention.Sources[0].Namespace = c.destinationNamespace - } - logger.Log(t, "creating intentions in server peer") _, _, err = staticServerPeerClient.ConfigEntries().Set(intention, &api.WriteOptions{}) require.NoError(t, err) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - _, err := staticServerPeerClient.ConfigEntries().Delete(api.ServiceIntentions, staticServerName, &api.WriteOptions{}) - require.NoError(t, err) - }) - logger.Log(t, "checking that connection is successful") k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") }) } } - -func copySecret(t *testing.T, cfg *config.TestConfig, sourceContext, destContext environment.TestContext, secretName string) { - t.Helper() - var secret *corev1.Secret - var err error - retry.Run(t, func(r *retry.R) { - secret, err = sourceContext.KubernetesClient(t).CoreV1().Secrets(sourceContext.KubectlOptions(t).Namespace).Get(context.Background(), secretName, metav1.GetOptions{}) - secret.ResourceVersion = "" - require.NoError(r, err) - }) - _, err = destContext.KubernetesClient(t).CoreV1().Secrets(destContext.KubectlOptions(t).Namespace).Create(context.Background(), secret, metav1.CreateOptions{}) - require.NoError(t, err) - - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.RunKubectl(t, destContext.KubectlOptions(t), "delete", "secret", "api-token") - }) -}