From b429aa38ef17b2168e95c63420ef9e0daacc88da Mon Sep 17 00:00:00 2001 From: Semir Patel Date: Thu, 10 Nov 2022 16:10:17 +0000 Subject: [PATCH] backport of commit 77fb7c72fabce381ee6914d2ff2e739570f64e70 --- .circleci/config.yml | 120 +- .github/ISSUE_TEMPLATE/config.yml | 3 - .github/workflows/build.yml | 34 +- .github/workflows/reusable-acceptance.yml | 70 +- .github/workflows/reusable-unit.yml | 8 +- .github/workflows/test.yml | 207 +- .gitignore | 1 - .go-version | 2 +- .release/release-metadata.hcl | 2 +- CHANGELOG.md | 103 +- CONTRIBUTING.md | 4 +- LICENSE => LICENSE.md | 2 - README.md | 2 +- acceptance/framework/config/config.go | 5 +- acceptance/framework/config/config_test.go | 6 +- acceptance/framework/consul/helm_cluster.go | 19 +- .../framework/consul/helm_cluster_test.go | 47 +- acceptance/framework/flags/flags.go | 12 +- acceptance/framework/k8s/deploy.go | 2 +- acceptance/go.mod | 2 +- acceptance/go.sum | 41 - acceptance/tests/basic/basic_test.go | 1 - acceptance/tests/cli/cli_install_test.go | 109 - acceptance/tests/cli/cli_upgrade_test.go | 43 - acceptance/tests/cli/main_test.go | 15 - .../connect/connect_external_servers_test.go | 142 - .../connect}/connect_helper.go | 28 +- .../connect/connect_inject_namespaces_test.go | 28 +- .../tests/connect/connect_inject_test.go | 278 +- .../tests/consul-dns/consul_dns_test.go | 12 +- .../tests/controller/controller_test.go | 20 +- .../bases/mesh-peering/kustomization.yaml | 2 - .../bases/mesh-peering/meshpeering.yaml | 7 - .../bases/static-metrics-app/deployment.yaml | 2 +- .../ingress_gateway_namespaces_test.go | 40 +- .../ingress-gateway/ingress_gateway_test.go | 16 +- .../main_test.go | 4 +- .../tests/mesh-gateway/mesh_gateway_test.go | 301 + acceptance/tests/metrics/metrics_test.go | 13 +- .../partitions/partitions_connect_test.go | 287 +- .../tests/partitions/partitions_sync_test.go | 56 +- acceptance/tests/peering/main_test.go | 19 +- .../peering_connect_namespaces_test.go | 64 +- .../tests/peering/peering_connect_test.go | 59 +- .../snapshot_agent_k8s_secret_test.go | 123 +- .../snapshot_agent_vault_test.go | 86 +- acceptance/tests/sync/sync_catalog_test.go | 47 +- .../terminating_gateway_destinations_test.go | 7 +- .../terminating_gateway_namespaces_test.go | 42 +- .../terminating_gateway_test.go | 16 +- acceptance/tests/vault/main_test.go | 7 +- .../tests/vault/vault_partitions_test.go | 3 +- .../wan-federation/wan_federation_test.go | 190 - charts/consul/Chart.yaml | 10 +- charts/consul/README.md | 2 +- charts/consul/templates/_helpers.tpl | 119 +- .../api-gateway-controller-deployment.yaml | 2 - charts/consul/templates/client-daemonset.yaml | 10 +- .../client-snapshot-agent-deployment.yaml | 281 + ...ient-snapshot-agent-podsecuritypolicy.yaml | 42 + .../templates/client-snapshot-agent-role.yaml | 26 + .../client-snapshot-agent-rolebinding.yaml | 22 + .../client-snapshot-agent-serviceaccount.yaml | 25 + .../templates/connect-inject-deployment.yaml | 174 +- .../connect-injector-disruptionbudget.yaml | 4 - .../templates/controller-deployment.yaml | 156 +- charts/consul/templates/crd-meshes.yaml | 12 - .../consul/templates/crd-servicedefaults.yaml | 11 - .../templates/crd-serviceresolvers.yaml | 35 - .../create-federation-secret-job.yaml | 29 +- .../templates/expose-servers-service.yaml | 4 +- .../ingress-gateways-deployment.yaml | 546 +- .../templates/mesh-gateway-deployment.yaml | 489 +- .../templates/mesh-gateway-service.yaml | 2 +- .../consul/templates/partition-init-job.yaml | 53 +- .../consul/templates/partition-service.yaml | 45 + .../consul/templates/server-acl-init-job.yaml | 421 +- .../templates/server-config-configmap.yaml | 9 +- .../templates/server-podsecuritypolicy.yaml | 4 +- charts/consul/templates/server-service.yaml | 8 +- .../server-snapshot-agent-configmap.yaml | 24 - .../consul/templates/server-statefulset.yaml | 180 +- .../templates/sync-catalog-deployment.yaml | 132 +- .../terminating-gateways-deployment.yaml | 410 +- .../terminating-gateways-service.yaml | 31 - charts/consul/test/docker/Test.dockerfile | 6 +- charts/consul/test/terraform/eks/main.tf | 4 +- .../api-gateway-controller-deployment.bats | 229 +- .../test/unit/client-config-configmap.bats | 18 +- charts/consul/test/unit/client-daemonset.bats | 430 +- .../test/unit/client-podsecuritypolicy.bats | 12 - charts/consul/test/unit/client-role.bats | 8 +- .../consul/test/unit/client-rolebinding.bats | 9 + .../client-securitycontextconstraints.bats | 7 - .../test/unit/client-serviceaccount.bats | 12 +- .../client-snapshot-agent-deployment.bats | 1094 +++ ...ient-snapshot-agent-podsecuritypolicy.bats | 30 + .../test/unit/client-snapshot-agent-role.bats | 55 + .../client-snapshot-agent-rolebinding.bats | 40 + .../client-snapshot-agent-serviceaccount.bats | 83 + charts/consul/test/unit/cni-daemonset.bats | 1 + .../test/unit/connect-inject-clusterrole.bats | 8 +- .../connect-inject-clusterrolebinding.bats | 10 +- .../test/unit/connect-inject-deployment.bats | 1343 ++-- ...t-inject-mutatingwebhookconfiguration.bats | 13 +- .../test/unit/connect-inject-service.bats | 9 +- .../unit/connect-inject-serviceaccount.bats | 8 +- .../connect-injector-disruptionbudget.bats | 42 +- .../test/unit/controller-clusterrole.bats | 8 +- .../unit/controller-clusterrolebinding.bats | 8 +- .../test/unit/controller-deployment.bats | 700 +- .../unit/controller-leader-election-role.bats | 8 +- ...ontroller-leader-election-rolebinding.bats | 8 +- ...ntroller-mutatingwebhookconfiguration.bats | 8 +- .../test/unit/controller-serviceaccount.bats | 8 +- .../test/unit/controller-webhook-service.bats | 8 +- .../test/unit/crd-exportedservices.bats | 10 +- .../consul/test/unit/crd-ingressgateways.bats | 22 +- charts/consul/test/unit/crd-meshes.bats | 22 +- .../consul/test/unit/crd-proxydefaults.bats | 10 +- .../consul/test/unit/crd-servicedefaults.bats | 10 +- .../test/unit/crd-serviceintentions.bats | 8 +- .../test/unit/crd-serviceresolvers.bats | 10 +- .../consul/test/unit/crd-servicerouters.bats | 10 +- .../test/unit/crd-servicesplitters.bats | 10 +- .../test/unit/crd-terminatinggateway.bats | 10 +- .../unit/create-federation-secret-job.bats | 60 + .../test/unit/expose-servers-service.bats | 92 +- .../unit/ingress-gateways-deployment.bats | 1243 +++- .../test/unit/mesh-gateway-clusterrole.bats | 2 + .../test/unit/mesh-gateway-deployment.bats | 1359 ++-- .../test/unit/mesh-gateway-service.bats | 14 +- .../consul/test/unit/partition-init-job.bats | 403 +- .../partition-init-podsecuritypolicy.bats | 8 +- .../consul/test/unit/partition-init-role.bats | 10 +- .../test/unit/partition-init-rolebinding.bats | 6 +- .../unit/partition-init-serviceaccount.bats | 6 +- .../test/unit/partition-name-configmap.bats | 5 +- .../consul/test/unit/partition-service.bats | 133 + .../consul/test/unit/server-acl-init-job.bats | 441 +- .../test/unit/server-config-configmap.bats | 31 +- .../test/unit/server-podsecuritypolicy.bats | 4 +- charts/consul/test/unit/server-service.bats | 9 +- .../consul/test/unit/server-statefulset.bats | 956 +-- .../test/unit/sync-catalog-deployment.bats | 690 +- .../unit/terminating-gateways-deployment.bats | 968 ++- .../unit/terminating-gateways-service.bats | 49 - .../webhook-cert-manager-clusterrole.bats | 8 +- ...bhook-cert-manager-clusterrolebinding.bats | 8 +- .../unit/webhook-cert-manager-configmap.bats | 8 +- .../unit/webhook-cert-manager-deployment.bats | 8 +- .../webhook-cert-manager-serviceaccount.bats | 8 +- charts/consul/values.yaml | 337 +- charts/demo/.helmignore | 23 - charts/demo/Chart.yaml | 24 - charts/demo/templates/frontend.yaml | 116 - charts/demo/templates/postgres.yaml | 76 - charts/demo/templates/product-api.yaml | 108 - charts/demo/templates/public-api.yaml | 79 - charts/demo/values.yaml | 1 - charts/embed_chart.go | 3 - charts/go.mod | 2 +- cli/cmd/install/install.go | 291 +- cli/cmd/install/install_test.go | 486 +- cli/cmd/status/status.go | 75 +- cli/cmd/status/status_test.go | 381 +- cli/cmd/uninstall/uninstall.go | 415 +- cli/cmd/uninstall/uninstall_test.go | 575 +- cli/cmd/upgrade/upgrade.go | 287 +- cli/cmd/upgrade/upgrade_test.go | 459 +- cli/common/error.go | 25 - cli/common/terminal/basic.go | 3 +- cli/common/terminal/ui.go | 60 +- cli/common/utils.go | 40 +- cli/config/config.go | 16 - cli/config/presets.go | 71 + cli/go.mod | 21 +- cli/go.sum | 192 - cli/helm/action.go | 83 - cli/helm/chart.go | 4 +- cli/helm/install.go | 140 - cli/helm/install_test.go | 82 - cli/helm/mock.go | 136 - cli/helm/upgrade.go | 149 - cli/helm/upgrade_test.go | 117 - cli/preset/cloud_preset.go | 431 -- cli/preset/cloud_preset_test.go | 701 -- cli/preset/demo.go | 43 - cli/preset/preset.go | 84 - cli/preset/preset_test.go | 78 - cli/preset/quickstart.go | 43 - cli/preset/secure.go | 37 - cli/version/version.go | 4 +- control-plane/Dockerfile | 6 +- control-plane/Makefile | 21 + .../api/v1alpha1/exportedservices_types.go | 2 +- .../v1alpha1/exportedservices_types_test.go | 8 +- .../api/v1alpha1/exportedservices_webhook.go | 8 +- .../v1alpha1/exportedservices_webhook_test.go | 9 +- .../api/v1alpha1/ingressgateway_webhook.go | 4 +- control-plane/api/v1alpha1/mesh_types.go | 37 +- control-plane/api/v1alpha1/mesh_types_test.go | 70 +- control-plane/api/v1alpha1/mesh_webhook.go | 24 +- .../api/v1alpha1/mesh_webhook_test.go | 24 +- .../api/v1alpha1/peeringacceptor_webhook.go | 7 +- .../v1alpha1/peeringacceptor_webhook_test.go | 7 +- .../api/v1alpha1/peeringdialer_webhook.go | 6 +- .../v1alpha1/peeringdialer_webhook_test.go | 7 +- .../api/v1alpha1/proxydefaults_webhook.go | 8 +- .../v1alpha1/proxydefaults_webhook_test.go | 7 +- .../api/v1alpha1/servicedefaults_types.go | 22 +- .../v1alpha1/servicedefaults_types_test.go | 4 - .../api/v1alpha1/servicedefaults_webhook.go | 4 +- .../api/v1alpha1/serviceintentions_webhook.go | 8 +- .../serviceintentions_webhook_test.go | 21 +- .../api/v1alpha1/serviceresolver_types.go | 47 +- .../v1alpha1/serviceresolver_types_test.go | 32 +- .../api/v1alpha1/serviceresolver_webhook.go | 4 +- .../api/v1alpha1/servicerouter_webhook.go | 4 +- .../api/v1alpha1/servicesplitter_webhook.go | 4 +- .../v1alpha1/terminatinggateway_webhook.go | 4 +- .../api/v1alpha1/zz_generated.deepcopy.go | 47 +- .../build-support/functions/10-util.sh | 13 +- .../to-consul/consul_node_services_client.go | 114 + .../consul_node_services_client_ent_test.go | 362 + .../consul_node_services_client_test.go | 184 + control-plane/catalog/to-consul/resource.go | 2 +- control-plane/catalog/to-consul/syncer.go | 89 +- .../catalog/to-consul/syncer_ent_test.go | 47 +- .../catalog/to-consul/syncer_test.go | 125 +- control-plane/catalog/to-k8s/source.go | 27 +- control-plane/catalog/to-k8s/source_test.go | 137 +- control-plane/cni/go.mod | 6 +- control-plane/cni/go.sum | 11 +- control-plane/commands.go | 10 + .../bases/consul.hashicorp.com_meshes.yaml | 12 - .../consul.hashicorp.com_servicedefaults.yaml | 11 - ...consul.hashicorp.com_serviceresolvers.yaml | 35 - control-plane/connect-inject/annotations.go | 37 +- .../consul_dataplane_sidecar.go | 369 - .../consul_dataplane_sidecar_test.go | 1035 --- .../connect-inject/consul_sidecar.go | 115 + .../connect-inject/consul_sidecar_test.go | 343 + .../connect-inject/container_init.go | 430 +- .../connect-inject/container_init_test.go | 1242 ++-- .../connect-inject/container_volume.go | 2 +- control-plane/connect-inject/dns.go | 90 - control-plane/connect-inject/dns_test.go | 102 - .../connect-inject/endpoints_controller.go | 844 ++- .../endpoints_controller_ent_test.go | 1659 ++--- .../endpoints_controller_test.go | 5947 ++++++++--------- control-plane/connect-inject/envoy_sidecar.go | 217 + .../connect-inject/envoy_sidecar_test.go | 638 ++ control-plane/connect-inject/mesh_webhook.go | 104 +- .../connect-inject/mesh_webhook_ent_test.go | 101 +- .../connect-inject/mesh_webhook_test.go | 88 +- .../connect-inject/metrics_configuration.go | 1 - .../peering_acceptor_controller.go | 132 +- .../peering_acceptor_controller_test.go | 472 +- .../peering_dialer_controller.go | 45 +- .../peering_dialer_controller_test.go | 108 +- .../connect-inject/redirect_traffic.go | 72 +- .../connect-inject/redirect_traffic_test.go | 178 +- control-plane/consul/consul.go | 41 - .../consul/mock_ServerConnectionManager.go | 59 - .../controller/configentry_controller.go | 33 +- .../configentry_controller_ent_test.go | 48 +- .../controller/configentry_controller_test.go | 363 +- .../exportedservices_controller_ent_test.go | 49 +- control-plane/go.mod | 18 +- control-plane/go.sum | 35 +- control-plane/helper/test/test_util.go | 85 +- .../subcommand/acl-init/command_test.go | 2 +- control-plane/subcommand/common/common.go | 19 +- .../subcommand/common/common_test.go | 8 +- control-plane/subcommand/common/test_util.go | 2 +- .../subcommand/connect-init/command.go | 339 +- .../connect-init/command_ent_test.go | 175 +- .../subcommand/connect-init/command_test.go | 930 +-- .../subcommand/consul-logout/command_test.go | 4 +- .../subcommand/consul-sidecar/command.go | 427 ++ .../consul-sidecar/command_ent_test.go | 90 + .../subcommand/consul-sidecar/command_test.go | 643 ++ .../subcommand/controller/command.go | 145 +- .../subcommand/controller/command_test.go | 6 +- .../create-federation-secret/command_test.go | 14 +- control-plane/subcommand/flags/consul.go | 265 - control-plane/subcommand/flags/consul_test.go | 436 -- control-plane/subcommand/flags/http.go | 4 - .../get-consul-client-ca/command_test.go | 8 +- .../subcommand/inject-connect/command.go | 354 +- .../subcommand/inject-connect/command_test.go | 156 +- .../subcommand/install-cni/binary.go | 2 +- .../subcommand/install-cni/cniconfig.go | 25 +- .../subcommand/install-cni/cniconfig_test.go | 14 +- .../subcommand/install-cni/command_test.go | 7 +- .../subcommand/install-cni/kubeconfig.go | 3 +- .../subcommand/install-cni/kubeconfig_test.go | 4 +- .../subcommand/partition-init/command.go | 92 +- .../partition-init/command_ent_test.go | 41 +- .../subcommand/server-acl-init/command.go | 144 +- .../server-acl-init/command_ent_test.go | 377 +- .../server-acl-init/command_test.go | 574 +- .../server-acl-init/create_or_update.go | 8 +- .../subcommand/server-acl-init/rules.go | 14 +- .../subcommand/server-acl-init/rules_test.go | 41 +- .../subcommand/server-acl-init/servers.go | 74 +- .../subcommand/service-address/command.go | 224 + .../service-address/command_test.go | 397 ++ .../subcommand/sync-catalog/command.go | 120 +- .../sync-catalog/command_ent_test.go | 121 +- .../subcommand/sync-catalog/command_test.go | 124 +- .../webhook-cert-manager/command_test.go | 10 +- control-plane/version/version.go | 4 +- hack/aws-acceptance-test-cleanup/go.mod | 2 +- hack/copy-crds-to-chart/go.mod | 2 +- hack/helm-reference-gen/go.mod | 2 +- 317 files changed, 22883 insertions(+), 23566 deletions(-) rename LICENSE => LICENSE.md (99%) delete mode 100644 acceptance/tests/cli/cli_install_test.go delete mode 100644 acceptance/tests/cli/cli_upgrade_test.go delete mode 100644 acceptance/tests/cli/main_test.go delete mode 100644 acceptance/tests/connect/connect_external_servers_test.go rename acceptance/{framework/connhelper => tests/connect}/connect_helper.go (92%) delete mode 100644 acceptance/tests/fixtures/bases/mesh-peering/kustomization.yaml delete mode 100644 acceptance/tests/fixtures/bases/mesh-peering/meshpeering.yaml rename acceptance/tests/{wan-federation => mesh-gateway}/main_test.go (72%) create mode 100644 acceptance/tests/mesh-gateway/mesh_gateway_test.go delete mode 100644 acceptance/tests/wan-federation/wan_federation_test.go create mode 100644 charts/consul/templates/client-snapshot-agent-deployment.yaml create mode 100644 charts/consul/templates/client-snapshot-agent-podsecuritypolicy.yaml create mode 100644 charts/consul/templates/client-snapshot-agent-role.yaml create mode 100644 charts/consul/templates/client-snapshot-agent-rolebinding.yaml create mode 100644 charts/consul/templates/client-snapshot-agent-serviceaccount.yaml create mode 100644 charts/consul/templates/partition-service.yaml delete mode 100644 charts/consul/templates/server-snapshot-agent-configmap.yaml delete mode 100644 charts/consul/templates/terminating-gateways-service.yaml create mode 100644 charts/consul/test/unit/client-snapshot-agent-deployment.bats create mode 100644 charts/consul/test/unit/client-snapshot-agent-podsecuritypolicy.bats create mode 100644 charts/consul/test/unit/client-snapshot-agent-role.bats create mode 100644 charts/consul/test/unit/client-snapshot-agent-rolebinding.bats create mode 100644 charts/consul/test/unit/client-snapshot-agent-serviceaccount.bats create mode 100755 charts/consul/test/unit/partition-service.bats delete mode 100644 charts/consul/test/unit/terminating-gateways-service.bats delete mode 100644 charts/demo/.helmignore delete mode 100644 charts/demo/Chart.yaml delete mode 100644 charts/demo/templates/frontend.yaml delete mode 100644 charts/demo/templates/postgres.yaml delete mode 100644 charts/demo/templates/product-api.yaml delete mode 100644 charts/demo/templates/public-api.yaml delete mode 100644 charts/demo/values.yaml delete mode 100644 cli/common/error.go delete mode 100644 cli/config/config.go create mode 100644 cli/config/presets.go delete mode 100644 cli/helm/install.go delete mode 100644 cli/helm/install_test.go delete mode 100644 cli/helm/mock.go delete mode 100644 cli/helm/upgrade.go delete mode 100644 cli/helm/upgrade_test.go delete mode 100644 cli/preset/cloud_preset.go delete mode 100644 cli/preset/cloud_preset_test.go delete mode 100644 cli/preset/demo.go delete mode 100644 cli/preset/preset.go delete mode 100644 cli/preset/preset_test.go delete mode 100644 cli/preset/quickstart.go delete mode 100644 cli/preset/secure.go create mode 100644 control-plane/catalog/to-consul/consul_node_services_client.go create mode 100644 control-plane/catalog/to-consul/consul_node_services_client_ent_test.go create mode 100644 control-plane/catalog/to-consul/consul_node_services_client_test.go delete mode 100644 control-plane/connect-inject/consul_dataplane_sidecar.go delete mode 100644 control-plane/connect-inject/consul_dataplane_sidecar_test.go create mode 100644 control-plane/connect-inject/consul_sidecar.go create mode 100644 control-plane/connect-inject/consul_sidecar_test.go delete mode 100644 control-plane/connect-inject/dns.go delete mode 100644 control-plane/connect-inject/dns_test.go create mode 100644 control-plane/connect-inject/envoy_sidecar.go create mode 100644 control-plane/connect-inject/envoy_sidecar_test.go delete mode 100644 control-plane/consul/mock_ServerConnectionManager.go create mode 100644 control-plane/subcommand/consul-sidecar/command.go create mode 100644 control-plane/subcommand/consul-sidecar/command_ent_test.go create mode 100644 control-plane/subcommand/consul-sidecar/command_test.go delete mode 100644 control-plane/subcommand/flags/consul.go delete mode 100644 control-plane/subcommand/flags/consul_test.go create mode 100644 control-plane/subcommand/service-address/command.go create mode 100644 control-plane/subcommand/service-address/command_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 2884a491cd..05f6a209e9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,7 @@ orbs: executors: go: docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.19.2 + - image: docker.mirror.hashicorp.services/cimg/go:1.18.3 environment: TEST_RESULTS: /tmp/test-results # path to where test results are saved @@ -21,8 +21,8 @@ gke-terraform-path: &gke-terraform-path charts/consul/test/terraform/gke eks-terraform-path: &eks-terraform-path charts/consul/test/terraform/eks aks-terraform-path: &aks-terraform-path charts/consul/test/terraform/aks openshift-terraform-path: &openshift-terraform-path charts/consul/test/terraform/openshift -# This image is built from test/docker/Test.dockerfile -consul-helm-test-image: &consul-helm-test-image docker.mirror.hashicorp.services/hashicorpdev/consul-helm-test:0.15.0 +# This image is built from charts/consul/test/docker/Test.dockerfile and provides the necessary dependencies for running on our cloud targets. +consul-helm-test-image: &consul-helm-test-image docker.mirror.hashicorp.services/hashicorpdev/consul-helm-test:0.12.3 ######################## # COMMANDS @@ -34,14 +34,14 @@ commands: - run: name: Install go, gotestsum, kind, kubectl, and helm command: | - wget https://golang.org/dl/go1.19.2.linux-amd64.tar.gz - sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.19.2.linux-amd64.tar.gz - rm go1.19.2.linux-amd64.tar.gz + wget https://golang.org/dl/go1.18.3.linux-amd64.tar.gz + sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.18.3.linux-amd64.tar.gz + rm go1.18.3.linux-amd64.tar.gz echo 'export PATH=$PATH:/usr/local/go/bin' >> $BASH_ENV - wget https://github.com/gotestyourself/gotestsum/releases/download/v1.8.2/gotestsum_1.8.2_linux_amd64.tar.gz - sudo tar -C /usr/local/bin -xzf gotestsum_1.8.2_linux_amd64.tar.gz - rm gotestsum_1.8.2_linux_amd64.tar.gz + wget https://github.com/gotestyourself/gotestsum/releases/download/v1.6.4/gotestsum_1.6.4_linux_amd64.tar.gz + sudo tar -C /usr/local/bin -xzf gotestsum_1.6.4_linux_amd64.tar.gz + rm gotestsum_1.6.4_linux_amd64.tar.gz curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.15.0/kind-linux-amd64 chmod +x ./kind @@ -51,8 +51,8 @@ commands: chmod +x ./kubectl sudo mv ./kubectl /usr/local/bin/kubectl - wget https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz - tar -zxvf helm-v3.9.4-linux-amd64.tar.gz + wget https://get.helm.sh/helm-v3.7.0-linux-amd64.tar.gz + tar -zxvf helm-v3.7.0-linux-amd64.tar.gz sudo mv linux-amd64/helm /usr/local/bin/helm custom-checkout: description: | @@ -170,8 +170,8 @@ commands: do if ! gotestsum --no-summary=all --jsonfile=jsonfile-${pkg////-} -- $pkg -p 1 -timeout 2h -failfast \ << parameters.additional-flags >> \ - -enable-multi-cluster \ ${ENABLE_ENTERPRISE:+-enable-enterprise} \ + -enable-multi-cluster \ -debug-directory="$TEST_RESULTS/debug" \ -consul-k8s-image=<< parameters.consul-k8s-image >> then @@ -589,7 +589,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: failfast: true - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -consul-image=docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results - store_artifacts: @@ -622,7 +622,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: failfast: true - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-image=docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results - store_artifacts: @@ -638,7 +638,7 @@ jobs: steps: - checkout - install-prereqs - - create-kind-cni-clusters: + - create-kind-clusters: version: "v1.24.4" - restore_cache: keys: @@ -655,7 +655,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: failfast: true - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -enable-cni -consul-image=docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results - store_artifacts: @@ -773,7 +773,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-pod-security-policies -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-pod-security-policies -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -842,7 +842,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -use-gke -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-pod-security-policies -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -use-gke -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-pod-security-policies -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -899,7 +899,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -use-aks -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -956,7 +956,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -use-aks -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -1018,7 +1018,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -1081,7 +1081,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -1135,7 +1135,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-openshift -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.14-dev + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-openshift -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -1154,19 +1154,57 @@ jobs: fail_only: true failure_message: "OpenShift acceptance tests failed. Check the logs at: ${CIRCLE_BUILD_URL}" + acceptance-kind-1-23-consul-compat-nightly-1-11: + environment: + - TEST_RESULTS: /tmp/test-results + - CONSUL_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.11-dev" + - ENVOY_IMAGE: "envoyproxy/envoy:v1.20.2" + - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.48.0" + - HELM_CHART_VERSION: "0.48.0" + machine: + image: ubuntu-2004:202010-01 + resource_class: xlarge + steps: + - checkout + - install-prereqs + - create-kind-clusters: + version: "v1.23.0" + - restore_cache: + keys: + - consul-helm-modcache-v2-{{ checksum "acceptance/go.mod" }} + - run: + name: go mod download + working_directory: *acceptance-mod-path + command: go mod download + - save_cache: + key: consul-helm-modcache-v2-{{ checksum "acceptance/go.mod" }} + paths: + - ~/.go_workspace/pkg/mod + - build-cli + - run: mkdir -p $TEST_RESULTS + - run-acceptance-tests: + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-k8s-image=$CONSUL_K8S_IMAGE -consul-image=$CONSUL_IMAGE -consul-version="1.11" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION + - store_test_results: + path: /tmp/test-results + - store_artifacts: + path: /tmp/test-results + - slack/status: + channel: *slack-channel + fail_only: true + failure_message: "Acceptance tests against Kind with Kubernetes v1.23 with Consul 1.11 nightly failed. Check the logs at: ${CIRCLE_BUILD_URL}" + acceptance-kind-1-23-consul-compat-nightly-1-12: environment: - TEST_RESULTS: /tmp/test-results - CONSUL_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.12-dev" - ENVOY_IMAGE: "envoyproxy/envoy:v1.22.2" - - HELM_CHART_VERSION: "0.49.0" - - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.49.0" + - HELM_CHART_VERSION: "0.48.0" + - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.48.0" machine: image: ubuntu-2004:202010-01 resource_class: xlarge steps: - - custom-checkout: - git-ref: "v$HELM_CHART_VERSION" + - checkout - install-prereqs - create-kind-clusters: version: "v1.23.0" @@ -1184,8 +1222,7 @@ jobs: - build-cli - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - consul-k8s-image: $CONSUL_K8S_IMAGE - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -consul-image=$CONSUL_IMAGE -consul-version="1.12" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION -enable-transparent-proxy + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-k8s-image=$CONSUL_K8S_IMAGE -consul-image=$CONSUL_IMAGE -consul-version="1.12" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION - store_test_results: path: /tmp/test-results - store_artifacts: @@ -1200,14 +1237,13 @@ jobs: - TEST_RESULTS: /tmp/test-results - CONSUL_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.13-dev" - ENVOY_IMAGE: "envoyproxy/envoy:v1.23.1" - - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.49.0" - - HELM_CHART_VERSION: "0.49.0" + - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.48.0" + - HELM_CHART_VERSION: "0.48.0" machine: image: ubuntu-2004:202010-01 resource_class: xlarge steps: - - custom-checkout: - git-ref: "v$HELM_CHART_VERSION" + - checkout - install-prereqs - create-kind-clusters: version: "v1.23.0" @@ -1225,8 +1261,7 @@ jobs: - build-cli - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - consul-k8s-image: $CONSUL_K8S_IMAGE - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -consul-image=$CONSUL_IMAGE -consul-version="1.13" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION -enable-transparent-proxy + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-k8s-image=$CONSUL_K8S_IMAGE -consul-image=$CONSUL_IMAGE -consul-version="1.13" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION - store_test_results: path: /tmp/test-results - store_artifacts: @@ -1272,7 +1307,7 @@ workflows: - acceptance-tproxy: context: consul-ci requires: - - dev-upload-docker + - dev-upload-docker nightly-cleanup: triggers: @@ -1316,18 +1351,6 @@ workflows: - acceptance-gke-cni-1-23: requires: - acceptance-gke-1-23 - - acceptance-eks-1-21: - requires: - - dev-upload-docker - - acceptance-eks-cni-1-21: - requires: - - acceptance-eks-1-21 - - acceptance-aks-1-22: - requires: - - dev-upload-docker - - acceptance-aks-cni-1-22: - requires: - - acceptance-aks-1-22 - acceptance-tproxy: requires: - dev-upload-docker @@ -1389,5 +1412,6 @@ workflows: only: - main jobs: + - acceptance-kind-1-23-consul-compat-nightly-1-11 - acceptance-kind-1-23-consul-compat-nightly-1-12 - acceptance-kind-1-23-consul-compat-nightly-1-13 diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index e22e28a48a..8082308548 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,5 @@ blank_issues_enabled: false contact_links: - - name: Consul Community Support - url: https://discuss.hashicorp.com/c/consul/29 - about: If you have a question or are looking for advice on Consul K8s, please post on our Discuss forum! The community loves to chime in to help. Happy Coding! - name: Consul on Kubernetes Learn Tutorials url: https://learn.hashicorp.com/collections/consul/kubernetes about: Please check out our Learn Tutorials. These hands on tutorials deal with many of the tasks common to using Consul on Kubernetes. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9ce46f1704..217741fa8a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,7 +19,7 @@ jobs: outputs: go-version: ${{ steps.get-go-version.outputs.go-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Determine Go version id: get-go-version # We use .go-version as our source of truth for current Go @@ -33,7 +33,7 @@ jobs: outputs: product-version: ${{ steps.get-product-version.outputs.product-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: get product version id: get-product-version run: | @@ -47,7 +47,7 @@ jobs: filepath: ${{ steps.generate-metadata-file.outputs.filepath }} steps: - name: "Checkout directory" - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Generate metadata file id: generate-metadata-file uses: hashicorp/actions-generate-metadata@v1 @@ -55,7 +55,7 @@ jobs: version: ${{ needs.get-product-version.outputs.product-version }} product: ${{ env.PKG_NAME }} repositoryOwner: "hashicorp" - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v2 with: name: metadata.json path: ${{ steps.generate-metadata-file.outputs.filepath }} @@ -107,10 +107,10 @@ jobs: name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} ${{ matrix.component }} build steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ matrix.go }} @@ -132,7 +132,7 @@ jobs: zip -r -j out/${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ - name: Upload built binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 with: name: ${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip path: ${{ matrix.component}}/out/${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip @@ -177,7 +177,7 @@ jobs: echo "Test PASSED, expected: ${VERSION}, got: ${CONSUL_K8S_VERSION}" - name: Upload rpm package - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 if: ${{ matrix.goos == 'linux' && matrix.component == 'cli' && matrix.goarch == 'amd64'}} with: name: ${{ env.RPM_PACKAGE }} @@ -202,7 +202,7 @@ jobs: echo "Test PASSED, expected: ${VERSION}, got: ${CONSUL_K8S_VERSION}" - name: Upload debian packages - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 if: ${{ matrix.goos == 'linux' && matrix.component == 'cli' && matrix.goarch == 'amd64'}} with: name: ${{ env.DEB_PACKAGE }} @@ -219,7 +219,7 @@ jobs: repo: ${{ github.event.repository.name }} version: ${{ needs.get-product-version.outputs.product-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - uses: actions/download-artifact@v3 with: name: consul-cni_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip @@ -263,7 +263,7 @@ jobs: repo: ${{ github.event.repository.name }} version: ${{ needs.get-product-version.outputs.product-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - uses: actions/download-artifact@v3 with: name: consul-cni_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip @@ -274,9 +274,9 @@ jobs: run: | cd "${ZIP_LOCATION}" unzip -j *.zip - - name: Copy LICENSE + - name: Copy LICENSE.md run: - cp LICENSE ./control-plane + cp LICENSE.md ./control-plane - uses: hashicorp/actions-docker-build@v1 with: smoke_test: | @@ -292,7 +292,7 @@ jobs: pkg_name: consul-k8s-control-plane_${{ env.version }} bin_name: consul-k8s-control-plane workdir: control-plane - redhat_tag: quay.io/redhat-isv-containers/611ca2f89a9b407267837100:${{env.version}}-ubi + redhat_tag: scan.connect.redhat.com/ospid-611ca2f89a9b407267837100/consul-k8s-control-plane:${{env.version}}-ubi build-docker-ubi-dockerhub: name: Docker ${{ matrix.arch }} UBI build for DockerHub @@ -305,7 +305,7 @@ jobs: repo: ${{ github.event.repository.name }} version: ${{ needs.get-product-version.outputs.product-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - uses: actions/download-artifact@v3 with: name: consul-cni_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip @@ -316,9 +316,9 @@ jobs: run: | cd ${ZIP_LOCATION} unzip -j *.zip - - name: Copy LICENSE + - name: Copy LICENSE.md run: - cp LICENSE ./control-plane + cp LICENSE.md ./control-plane - uses: hashicorp/actions-docker-build@v1 with: smoke_test: | diff --git a/.github/workflows/reusable-acceptance.yml b/.github/workflows/reusable-acceptance.yml index 01858cb828..56389bb346 100644 --- a/.github/workflows/reusable-acceptance.yml +++ b/.github/workflows/reusable-acceptance.yml @@ -11,8 +11,9 @@ on: type: string default: "" consul-k8s-image: - required: false + required: false type: string + default: docker.mirror.hashicorp.services/hashicorpdev/consul-k8s-control-plane:latest directory: required: true type: string @@ -25,11 +26,7 @@ on: kind-version: required: false type: string - default: "v1.24.6" - checkout-ref: - required: false - type: string - default: ${{ github.sha }} + default: "v1.22.4" secrets: CONSUL_ENT_LICENSE: required: true @@ -37,38 +34,32 @@ on: # Environment variables can only be used at the step level env: TEST_RESULTS: /tmp/test-results # path to where test results are saved - CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} - CONSUL_K8S_IMAGE: ${{ inputs.consul-k8s-image }} + CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} jobs: job: - runs-on: [custom, linux, xl] + runs-on: ubuntu-latest strategy: matrix: - include: - - {runner: "0", test-packages: "basic consul-dns metrics"} - - {runner: "1", test-packages: "connect"} - - {runner: "2", test-packages: "controller example"} - - {runner: "3", test-packages: "ingress-gateway"} - - {runner: "4", test-packages: "partitions"} - - {runner: "5", test-packages: "peering"} - - {runner: "6", test-packages: "snapshot-agent vault wan-federation"} - - {runner: "7", test-packages: "cli sync terminating-gateway"} + include: # I am really sorry for this but I could not find a way to automatically split our tests into several runners. For now, split manually. + - {runner: "0", test-packages: "basic connect consul-dns"} + - {runner: "1", test-packages: "controller example ingress-gateway"} + - {runner: "2", test-packages: "mesh-gateway metrics"} + - {runner: "3", test-packages: "partitions sync terminating-gateway"} + - {runner: "4", test-packages: "vault"} - fail-fast: false + fail-fast: true steps: - name: Checkout code - uses: actions/checkout@v3 - with: - ref: ${{ inputs.checkout-ref }} + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ inputs.go-version }} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build @@ -77,23 +68,11 @@ jobs: restore-keys: | ${{ runner.os }}-go- - - name: Install pre-requisites # Install gotestsum, kind, kubectl, and helm + - name: Install gotestsum run: | - wget https://github.com/gotestyourself/gotestsum/releases/download/v1.6.4/gotestsum_1.6.4_linux_amd64.tar.gz - sudo tar -C /usr/local/bin -xzf gotestsum_1.6.4_linux_amd64.tar.gz - rm gotestsum_1.6.4_linux_amd64.tar.gz - - curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.15.0/kind-linux-amd64 - chmod +x ./kind - sudo mv ./kind /usr/local/bin/kind - - curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" - chmod +x ./kubectl - sudo mv ./kubectl /usr/local/bin/kubectl - - wget https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz - tar -zxvf helm-v3.9.4-linux-amd64.tar.gz - sudo mv linux-amd64/helm /usr/local/bin/helm + wget https://github.com/gotestyourself/gotestsum/releases/download/v"${{ inputs.gotestsum-version }}"/gotestsum_"${{ inputs.gotestsum-version }}"_linux_amd64.tar.gz + sudo tar -C /usr/local/bin -xzf gotestsum_"${{ inputs.gotestsum-version }}"_linux_amd64.tar.gz + rm gotestsum_"${{ inputs.gotestsum-version }}"_linux_amd64.tar.gz - run: mkdir -p ${{ env.TEST_RESULTS }} @@ -106,11 +85,6 @@ jobs: kind create cluster --name dc1 --image kindest/node:${{ inputs.kind-version }} kind create cluster --name dc2 --image kindest/node:${{ inputs.kind-version }} - - name: Build CLI - run: | - sudo make cli-dev - consul-k8s version - # We have to run the tests for each package separately so that we can # exit early if any test fails (-failfast only works within a single # package). @@ -141,14 +115,14 @@ jobs: - name: Upload tests if: always() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 with: name: ${{ inputs.name }}-${{ matrix.test-packages }}-gotestsum-report.xml path: ${{ env.TEST_RESULTS }}/gotestsum-report.xml - - name: Upload debug (on failure) + - name: Upload debug (on failure) if: failure() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 with: name: ${{ inputs.name }}-${{ matrix.test-packages }}-debug-info path: ${{ env.TEST_RESULTS }}/debug diff --git a/.github/workflows/reusable-unit.yml b/.github/workflows/reusable-unit.yml index fa2b4130f5..9e563e0203 100644 --- a/.github/workflows/reusable-unit.yml +++ b/.github/workflows/reusable-unit.yml @@ -13,7 +13,7 @@ on: # Environment variables can only be used at the step level env: TEST_RESULTS: /tmp/test-results # path to where test results are saved - GOTESTSUM_VERSION: 1.8.2 + GOTESTSUM_VERSION: 1.8.1 jobs: job: @@ -21,15 +21,15 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{inputs.go-version}} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3bbaf5bfe7..ce1033a540 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,10 +4,9 @@ on: env: TEST_RESULTS: /tmp/test-results # path to where test results are saved - GOTESTSUM_VERSION: 1.8.2 # You cannot use environment variables with workflows. The gotestsum version is hardcoded in the reusable workflows too. - # We use docker images to copy the consul binary for unit tests. - CONSUL_OSS_DOCKER_IMAGE: hashicorppreview/consul:1.14-dev # Consul's OSS version to use in tests - CONSUL_ENT_DOCKER_IMAGE: hashicorppreview/consul-enterprise:1.14-dev # Consul's enterprise version to use in tests + CONSUL_VERSION: 1.13.1 # Consul's OSS version to use in tests + CONSUL_ENT_VERSION: 1.13.1+ent # Consul's enterprise version to use in tests + GOTESTSUM_VERSION: 1.8.1 # You cannot use environment variables with workflows. The gotestsum version is hardcoded in the reusable workflows too. jobs: get-go-version: @@ -16,7 +15,7 @@ jobs: outputs: go-version: ${{ steps.get-go-version.outputs.go-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Determine Go version id: get-go-version # We use .go-version as our source of truth for current Go @@ -25,33 +24,21 @@ jobs: echo "Building with Go $(cat .go-version)" echo "::set-output name=go-version::$(cat .go-version)" - get-product-version: - runs-on: ubuntu-latest - outputs: - product-version: ${{ steps.get-product-version.outputs.product-version }} - steps: - - uses: actions/checkout@v3 - - name: get product version - id: get-product-version - run: | - make version - echo "::set-output name=product-version::$(make version)" - validate-helm-gen: needs: - get-go-version runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ needs.get-go-version.outputs.go-version }} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build @@ -87,11 +74,11 @@ jobs: - unit-helm-gen runs-on: ubuntu-latest container: - image: docker.mirror.hashicorp.services/hashicorpdev/consul-helm-test:0.15.0 + image: docker.mirror.hashicorp.services/hashicorpdev/consul-helm-test:0.12.0 options: --user 1001 steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Run Unit Tests working-directory: charts/consul @@ -103,10 +90,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ needs.get-go-version.outputs.go-version }} @@ -129,15 +116,15 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ needs.get-go-version.outputs.go-version }} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build @@ -159,9 +146,11 @@ jobs: working-directory: control-plane run: | mkdir -p $HOME/bin - container_id=$(docker create ${{env.CONSUL_OSS_DOCKER_IMAGE}}) - docker cp "$container_id:/bin/consul" $HOME/bin/consul - docker rm "$container_id" + wget https://releases.hashicorp.com/consul/${{env.CONSUL_VERSION}}/consul_${{env.CONSUL_VERSION}}_linux_amd64.zip && \ + unzip consul_${{env.CONSUL_VERSION}}_linux_amd64.zip -d $HOME/bin && \ + rm consul_${{env.CONSUL_VERSION}}_linux_amd64.zip + chmod +x $HOME/bin/consul + - name: Run go tests working-directory: control-plane run: | @@ -176,15 +165,15 @@ jobs: CONSUL_LICENSE: ${{secrets.CONSUL_LICENSE}} steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ needs.get-go-version.outputs.go-version }} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build @@ -206,9 +195,10 @@ jobs: working-directory: control-plane run: | mkdir -p $HOME/bin - container_id=$(docker create ${{env.CONSUL_ENT_DOCKER_IMAGE}}) - docker cp "$container_id:/bin/consul" $HOME/bin/consul - docker rm "$container_id" + wget https://releases.hashicorp.com/consul/${{env.CONSUL_ENT_VERSION}}/consul_${{env.CONSUL_ENT_VERSION}}_linux_amd64.zip && \ + unzip consul_${{env.CONSUL_ENT_VERSION}}_linux_amd64.zip -d $HOME/bin && \ + rm consul_${{env.CONSUL_ENT_VERSION}}_linux_amd64.zip + chmod +x $HOME/bin/consul - name: Run go tests working-directory: control-plane @@ -217,51 +207,40 @@ jobs: gotestsum --junitfile ${{env.TEST_RESULTS}}/gotestsum-report.xml -- -tags=enterprise -p 4 $PACKAGE_NAMES build-distros: - needs: [get-go-version, get-product-version] + needs: [get-go-version, test-control-plane, test-enterprise-control-plane] runs-on: ubuntu-latest strategy: matrix: include: - # cli - - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "amd64", component: "cli", pkg_name: "consul-k8s", "bin_name": "consul-k8s" } - # control-plane - - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "amd64", component: "control-plane", pkg_name: "consul-k8s-control-plane", "bin_name": "consul-k8s-control-plane" } - # consul-cni - - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "amd64", component: "control-plane/cni", pkg_name: "consul-cni", "bin_name": "consul-cni" } - + - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "386"} + - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "amd64"} + - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "arm"} + - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "arm64"} fail-fast: true - name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} ${{ matrix.component }} build + name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ matrix.go }} - name: Build + working-directory: control-plane env: GOOS: ${{ matrix.goos }} GOARCH: ${{ matrix.goarch }} CGO_ENABLED: 0 - working-directory: ${{ matrix.component }} run: | - mkdir -p dist out - - export GIT_COMMIT=$(git rev-parse --short HEAD) - export GIT_DIRTY=$(test -n "$(git status --porcelain)" && echo "+CHANGES") - export GIT_IMPORT=github.com/hashicorp/consul-k8s/${{ matrix.component }}/version - export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X ${GIT_IMPORT}.GitDescribe=${{ needs.get-product-version.outputs.product-version }}" - - CGO_ENABLED=0 go build -o dist/${{ matrix.bin_name }} -ldflags "${GOLDFLAGS}" . - zip -r -j out/${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ + XC_OS=${{ matrix.goos }} XC_ARCH=${{ matrix.goarch }} ./build-support/scripts/build-local.sh + zip -r -j consul-k8s_${{ matrix.goos }}_${{ matrix.goarch }}.zip bin - - name: Upload built binaries - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v2 with: - name: ${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip - path: ${{ matrix.component}}/out/${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip + name: consul-k8s_${{ matrix.goos }}_${{ matrix.goarch }}.zip + path: control-plane/consul-k8s_${{ matrix.goos }}_${{ matrix.goarch }}.zip golangci-lint-acceptance: needs: @@ -293,92 +272,56 @@ jobs: directory: cli go-version: ${{ needs.get-go-version.outputs.go-version }} - # upload dev docker image - dev-upload-docker: - if: github.repository_owner == 'hashicorp' # Do not run on forks as this requires secrets - needs: [ get-product-version, build-distros ] - runs-on: ubuntu-latest - strategy: - matrix: - arch: ["amd64"] - env: - repo: ${{ github.event.repository.name }} - version: ${{ needs.get-product-version.outputs.product-version }} - steps: - - uses: actions/checkout@v3 - - uses: actions/download-artifact@v3 - with: - name: consul-cni_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip - path: control-plane/dist/cni/linux/${{ matrix.arch }} - - uses: actions/download-artifact@v3 - with: - name: consul-k8s-control-plane_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip - path: control-plane/dist/linux/${{ matrix.arch }} - - name: extract consul-cni zip - env: - ZIP_LOCATION: control-plane/dist/cni/linux/${{ matrix.arch }} - run: | - cd "${ZIP_LOCATION}" - unzip -j *.zip - - name: extract control-plane zip - env: - ZIP_LOCATION: control-plane/dist/linux/${{ matrix.arch }} - run: | - cd "${ZIP_LOCATION}" - unzip -j *.zip - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASS }} - - name: Docker Build (Action) - uses: docker/build-push-action@v3 - with: - push: true - context: control-plane - platforms: ${{ matrix.arch }} - target: release-default - tags: docker.io/hashicorppreview/${{ env.repo }}-control-plane:${{ env.version }}-pr-${{ github.sha }} - -# Disable GHA acceptance tests until GHA formally supported -# acceptance: -# needs: [ get-product-version, dev-upload-docker, get-go-version ] -# uses: hashicorp/consul-k8s/.github/workflows/reusable-acceptance.yml@main -# with: -# name: acceptance -# directory: acceptance/tests -# go-version: ${{ needs.get-go-version.outputs.go-version }} -# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -consul-image=docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.14-dev" -# gotestsum-version: 1.8.2 -# consul-k8s-image: docker.io/hashicorppreview/${{ github.event.repository.name }}-control-plane:${{ needs.get-product-version.outputs.product-version }}-pr-${{ github.sha }} -# secrets: -# CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} +# Disabling for now until we get faster VMs to run acceptance tests. Faster VMs for Github Actions are supposed +# to be available in the summer of 2022. For now, run the dev-upload docker and acceptance tests in CircleCI +# dev-upload-docker: +# if: github.repository_owner == 'hashicorp' # Do not run on forks as this requires secrets +# needs: build-distros +# runs-on: ubuntu-latest +# +# env: +# GITHUB_PULL_REQUEST: ${{github.event.pull_request.number}} +# DOCKER_USER: ${{secrets.DOCKER_USER}} +# DOCKER_PASS: ${{secrets.DOCKER_PASS}} +# steps: +# - uses: actions/checkout@v2 +# +# - run: mkdir -p control-plane/pkg/bin/linux_amd64 +# +# - uses: actions/download-artifact@v3 +# with: +# name: consul-k8s_linux_amd64.zip +# path: control-plane +# +# - name: Docker build +# working-directory: control-plane +# run: | +# unzip consul-k8s_linux_amd64.zip -d ./pkg/bin/linux_amd64 +# make ci.dev-docker-github # # acceptance-tproxy: -# needs: [ get-product-version, dev-upload-docker, get-go-version ] +# needs: [get-go-version, unit-cli, dev-upload-docker, unit-acceptance-framework, unit-test-helm-templates] +# needs: dev-upload-docker # uses: hashicorp/consul-k8s/.github/workflows/reusable-acceptance.yml@main # with: # name: acceptance-tproxy # directory: acceptance/tests # go-version: ${{ needs.get-go-version.outputs.go-version }} -# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-transparent-proxy -consul-image=docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.14-dev" -# gotestsum-version: 1.8.2 -# consul-k8s-image: docker.io/hashicorppreview/${{ github.event.repository.name }}-control-plane:${{ needs.get-product-version.outputs.product-version }}-pr-${{ github.sha }} +# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-transparent-proxy" +# gotestsum-version: 1.6.4 # secrets: # CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} # -# acceptance-cni: -# needs: [ get-product-version, dev-upload-docker, get-go-version ] +# acceptance: +# #needs: [get-go-version, unit-cli, dev-upload-docker, unit-acceptance-framework, unit-test-helm-templates] +# needs: dev-upload-docker # uses: hashicorp/consul-k8s/.github/workflows/reusable-acceptance.yml@main # with: # name: acceptance # directory: acceptance/tests # go-version: ${{ needs.get-go-version.outputs.go-version }} -# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-transparent-proxy -enable-cni -consul-image=docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.14-dev" -# gotestsum-version: 1.8.2 -# consul-k8s-image: docker.io/hashicorppreview/${{ github.event.repository.name }}-control-plane:${{ needs.get-product-version.outputs.product-version }}-pr-${{ github.sha }} +# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2" +# gotestsum-version: 1.6.4 # secrets: # CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} diff --git a/.gitignore b/.gitignore index ecc38e82e0..20ab4d997b 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,3 @@ bin/ pkg/ .idea/ .vscode -.bob/ diff --git a/.go-version b/.go-version index 836ae4eda2..b9fb27ab4f 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.19.2 +1.18.3 diff --git a/.release/release-metadata.hcl b/.release/release-metadata.hcl index c053fdac2f..0695fea61f 100644 --- a/.release/release-metadata.hcl +++ b/.release/release-metadata.hcl @@ -1,4 +1,4 @@ url_docker_registry_dockerhub = "https://hub.docker.com/r/hashicorp/consul-k8s-control-plane" -url_license = "https://github.com/hashicorp/consul-k8s/blob/main/LICENSE" +url_license = "https://github.com/hashicorp/consul-k8s/blob/main/LICENSE.md" url_project_website = "https://www.consul.io/docs/k8s" url_source_repository = "https://github.com/hashicorp/consul-k8s" diff --git a/CHANGELOG.md b/CHANGELOG.md index 67c5552f6f..4ce8d8aca5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,109 +1,14 @@ ## UNRELEASED -BREAKING_CHANGES: -* Helm: - * Remove `global.consulSidecarContainer` from values file as there is no longer a consul sidecar. [[GH-1635](https://github.com/hashicorp/consul-k8s/pull/1635)] - * Consul snapshot-agent now runs as a sidecar with Consul servers. [[GH-1620](https://github.com/hashicorp/consul-k8s/pull/1620)] - This results in the following changes to Helm values: - * Move `client.snapshotAgent` values to `server.snapshotAgent`, with the exception of the following values: - * `client.snaphostAgent.replicas` - * `client.snaphostAgent.serviceAccount` - * Remove `global.secretsBackend.vault.consulSnapshotAgentRole` value. You should now use the `global.secretsBackend.vault.consulServerRole` for access to any Vault secrets. -* Peering: - * Remove support for customizing the server addresses in peering token generation. Instead, mesh gateways should be used - to establish peering connections if the server pods are not directly reachable. [[GH-1610](https://github.com/hashicorp/consul-k8s/pull/1610)] - * Require `global.tls.enabled` when peering is enabled. [[GH-1610](https://github.com/hashicorp/consul-k8s/pull/1610)] - * Require `meshGateway.enabled` when peering is enabled. [[GH-1683](https://github.com/hashicorp/consul-k8s/pull/1683)] - -FEATURES: -* Consul-dataplane: - * Support merged metrics with consul-dataplane. [[GH-1635](https://github.com/hashicorp/consul-k8s/pull/1635)] - * Support transparent proxying when using consul-dataplane. [[GH-1625](https://github.com/hashicorp/consul-k8s/pull/1478),[GH-1632](https://github.com/hashicorp/consul-k8s/pull/1632)] - * Enable sync-catalog to only talk to Consul servers. [[GH-1659](https://github.com/hashicorp/consul-k8s/pull/1659)] - IMPROVEMENTS: -* CLI - * Update minimum go version for project to 1.19 [[GH-1633](https://github.com/hashicorp/consul-k8s/pull/1633)] - * Enable `consul-k8s uninstall` to delete custom resources when uninstalling Consul. This is done by default. [[GH-1623](https://github.com/hashicorp/consul-k8s/pull/1623)] -* Control Plane - * Update minimum go version for project to 1.19 [[GH-1633](https://github.com/hashicorp/consul-k8s/pull/1633)] - * Remove unneeded `agent:read` ACL permissions from mesh gateway policy. [[GH-1255](https://github.com/hashicorp/consul-k8s/pull/1255)] * Helm: - * Remove deprecated annotation `service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"` in the `server-service` template. [[GH-1619](https://github.com/hashicorp/consul-k8s/pull/1619)] - * Support `minAvailable` on connect injector `PodDisruptionBudget`. [[GH-1557](https://github.com/hashicorp/consul-k8s/pull/1557)] * Add `tolerations` and `nodeSelector` to Server ACL init jobs and `nodeSelector` to Webhook cert manager. [[GH-1581](https://github.com/hashicorp/consul-k8s/pull/1581)] + * API Gateway: Allow controller to read MeshServices for use as a route backend. [[GH-1574](https://github.com/hashicorp/consul-k8s/pull/1574)] * API Gateway: Add `tolerations` to `apiGateway.managedGatewayClass` and `apiGateway.controller` [[GH-1650](https://github.com/hashicorp/consul-k8s/pull/1650)] * API Gateway: Create PodSecurityPolicy for controller when `global.enablePodSecurityPolicies=true`. [[GH-1656](https://github.com/hashicorp/consul-k8s/pull/1656)] * API Gateway: Create PodSecurityPolicy and allow controller to bind it to ServiceAccounts that it creates for Gateway Deployments when `global.enablePodSecurityPolicies=true`. [[GH-1672](https://github.com/hashicorp/consul-k8s/pull/1672)] - * Deploy `expose-servers` service only when Admin Partitions(ENT) is enabled. [[GH-1683](https://github.com/hashicorp/consul-k8s/pull/1683)] -## 1.0.0-beta4 (October 28, 2022) - -IMPROVEMENTS: - -CLI: - -* Update demo charts and CLI command to not presume tproxy when using HCP preset. Also, use the most recent version of hashicups. [[GH-1657](https://github.com/hashicorp/consul-k8s/pull/1657)] - -## 1.0.0-beta3 (October 12, 2022) - -FEATURES: -* Peering: - * Add support for `PeerThroughMeshGateways` in Mesh CRD. [[GH-1478](https://github.com/hashicorp/consul-k8s/pull/1478)] - -BREAKING CHANGES: -* Helm: - * `syncCatalog.consulNamespaces.mirroringK8S` now defaults to `true`. [[GH-1601](https://github.com/hashicorp/consul-k8s/pull/1601)] - * `connectInject.consulNamespaces.mirroringK8S` now defaults to `true`. [[GH-1601](https://github.com/hashicorp/consul-k8s/pull/1601)] - -IMPROVEMENTS: -* Helm: - * API Gateway: Allow controller to read MeshServices for use as a route backend. [[GH-1574](https://github.com/hashicorp/consul-k8s/pull/1574)] -* CLI: - * `consul-k8s status` command will only show status of servers if they are expected to be present in the Kubernetes cluster. [[GH-1603](https://github.com/hashicorp/consul-k8s/pull/1603)] - -## 1.0.0-beta2 (October 7, 2022) -BREAKING CHANGES: -* Peering: - * Rename `PeerName` to `Peer` in ExportedServices CRD. [[GH-1596](https://github.com/hashicorp/consul-k8s/pull/1596)] -* Helm - * `server.replicas` now defaults to `1`. Formerly, this defaulted to `3`. [[GH-1551](https://github.com/hashicorp/consul-k8s/pull/1551)] - * `connectInject.enabled` now defaults to `true`. [[GH-1551](https://github.com/hashicorp/consul-k8s/pull/1551)] - * `controller.enabled` now defaults to `true`. [[GH-1551](https://github.com/hashicorp/consul-k8s/pull/1551)] - -BUG FIXES: -* CLI - * Allow optional environment variables for use in the cloud preset to the CLI for cluster bootstrapping. [[GH-1608](https://github.com/hashicorp/consul-k8s/pull/1608)] - * Configure `-tls-server-name` when `global.cloud.enabled=true` so that it matches the server certificate created via HCP [[GH-1591](https://github.com/hashicorp/consul-k8s/pull/1591)] - * Do not query clients in the status command since clients no longer exist. [[GH-1573](https://github.com/hashicorp/consul-k8s/pull/1573)] - -## 1.0.0-beta1 (October 4, 2022) -FEATURES: -* CLI: - * Add the ability to install HCP self-managed clusters. [[GH-1540](https://github.com/hashicorp/consul-k8s/pull/1540)] - * Add the ability to install the HashiCups demo application via the -demo flag. [[GH-1540](https://github.com/hashicorp/consul-k8s/pull/1540)] - -BREAKING CHANGES: -* Admin Partitions **(Consul Enterprise only)**: Remove the partition service. When configuring Admin Partitions, the expose-servers service should be used instead. -* Consul client agents are no longer deployed by default, and Consul service mesh no longer uses Consul clients to operate. This change affects several main areas listed below. [[GH-1552](https://github.com/hashicorp/consul-k8s/pull/1552)] - * Control plane: - * A new component `consul-dataplane` is now injected as a sidecar-proxy instead of plain Envoy. `consul-dataplane` manages the Envoy proxy process and proxies xDS requests from Envoy to Consul servers. - * All services on the service mesh are now registered directly with the central catalog in Consul servers. - * All service-mesh consul-k8s components are configured to talk directly to Consul servers. - * Mesh, ingress, and terminating gateways are now registered centrally by the endpoints controller, similar to how service-mesh services are registered. - * Helm: - * `client.enabled` now defaults to `false`. Setting it to `true` will deploy client agents, however, none of the consul-k8s components will use clients for their operation. - * `global.imageEnvoy` is no longer used for sidecar proxies, as well as mesh, terminating, and ingress gateways. - * `externalServers.grpcPort` default is now `8502` instead of `8503`. - * `meshGateway.service.enabled` value is removed. Mesh gateways now will always have a Kubernetes service as this is required to register them as a service with Consul. - * `meshGateway.initCopyConsulContainer`, `ingressGateways.initCopyConsulContainer`, `terminatingGateways.initCopyConsulContainer` values are removed. - * Known `beta` limitations: - * Transparent proxy is not yet supported. - * Metrics and observability is not yet supported. - * API gateway is not yet supported. - * Executables in the form of `exec=` are not yet supported when using external servers and ACLs. - -## 0.49.0 (September 29, 2022) +## 0.49.0 (September 30, 2022) FEATURES: * CLI: @@ -137,9 +42,6 @@ FEATURES: * Kubernetes 1.24 Support * Add support for Kubernetes 1.24 where ServiceAccounts no longer have long-term JWT tokens. [[GH-1431](https://github.com/hashicorp/consul-k8s/pull/1431)] * Upgrade kubeVersion in helm chart to support Kubernetes 1.21+. -* Cluster Peering: - * Add support for setting failover `Targets` on the Service Resolver CRD. [[GH-1284](https://github.com/hashicorp/consul-k8s/pull/1284)] - * Add support for redirecting to cluster peers on the Service Resolver CRD. [[GH-1284](https://github.com/hashicorp/consul-k8s/pull/1284)] BREAKING CHANGES: * Kubernetes 1.24 Support @@ -1702,3 +1604,4 @@ Features: ## 0.1.0 (September 26, 2018) * Initial release + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3550faef8f..14875e1d55 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -632,7 +632,7 @@ You can run other tests by enabling them by passing appropriate flags to `go tes For example, to run mesh gateway tests, which require two Kubernetes clusters, you may use the following command: - go test ./... -p 1 -timeout 20m \ + go test ./charts/consul/... -p 1 -timeout 20m \ -enable-multi-cluster \ -kubecontext= \ -secondary-kubecontext= @@ -879,7 +879,7 @@ func TestExample(t *testing.T) { } ``` -Please see [wan federation tests](acceptance/tests/wan-federation/wan_federation_test.go) +Please see [mesh gateway tests](acceptance/tests/mesh-gateway/mesh_gateway_test.go) for an example of how to use write a test that uses multiple contexts. #### Writing Assertions diff --git a/LICENSE b/LICENSE.md similarity index 99% rename from LICENSE rename to LICENSE.md index 74f38c0103..82b4de97c7 100644 --- a/LICENSE +++ b/LICENSE.md @@ -1,5 +1,3 @@ -Copyright (c) 2018 HashiCorp, Inc. - Mozilla Public License, version 2.0 1. Definitions diff --git a/README.md b/README.md index bc672a70e4..35b3c4b762 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ use Consul with Kubernetes, please see the [Consul and Kubernetes documentation](https://www.consul.io/docs/platform/k8s/index.html). ### Prerequisites - * **Helm 3.6+** + * **Helm 3.2+** (Helm 2 is not supported) * **Kubernetes 1.21-1.24** - This is the earliest version of Kubernetes tested. It is possible that this chart works with earlier versions, but it is untested. diff --git a/acceptance/framework/config/config.go b/acceptance/framework/config/config.go index 8e131c9c59..11a04f1622 100644 --- a/acceptance/framework/config/config.go +++ b/acceptance/framework/config/config.go @@ -52,9 +52,8 @@ type TestConfig struct { NoCleanupOnFailure bool DebugDirectory string - UseAKS bool - UseGKE bool UseKind bool + UseGKE bool helmChartPath string } @@ -153,7 +152,7 @@ func (t *TestConfig) entImage() (string, error) { preRelease = fmt.Sprintf("-%s", split[1]) } - return fmt.Sprintf("hashicorp/consul-enterprise:%s%s-ent", consulImageVersion, preRelease), nil + return fmt.Sprintf("hashicorp/consul-enterprise:%s-ent%s", consulImageVersion, preRelease), nil } // setIfNotEmpty sets key to val in map m if value is not empty. diff --git a/acceptance/framework/config/config_test.go b/acceptance/framework/config/config_test.go index 7733d815db..28fc48b810 100644 --- a/acceptance/framework/config/config_test.go +++ b/acceptance/framework/config/config_test.go @@ -138,11 +138,11 @@ func TestConfig_HelmValuesFromConfig_EntImage(t *testing.T) { }, { consulImage: "hashicorp/consul:1.8.5-rc1", - expImage: "hashicorp/consul-enterprise:1.8.5-rc1-ent", + expImage: "hashicorp/consul-enterprise:1.8.5-ent-rc1", }, { consulImage: "hashicorp/consul:1.7.0-beta3", - expImage: "hashicorp/consul-enterprise:1.7.0-beta3-ent", + expImage: "hashicorp/consul-enterprise:1.7.0-ent-beta3", }, { consulImage: "invalid", @@ -173,7 +173,7 @@ func TestConfig_HelmValuesFromConfig_EntImage(t *testing.T) { require.EqualError(t, err, tt.expErr) } else { require.NoError(t, err) - require.Equal(t, tt.expImage, values["global.image"]) + require.Contains(t, values["global.image"], tt.expImage) } }) } diff --git a/acceptance/framework/consul/helm_cluster.go b/acceptance/framework/consul/helm_cluster.go index a4008702e6..6ae2016eaf 100644 --- a/acceptance/framework/consul/helm_cluster.go +++ b/acceptance/framework/consul/helm_cluster.go @@ -35,10 +35,6 @@ type HelmCluster struct { // a bootstrap token from a Kubernetes secret stored in the cluster. ACLToken string - // SkipCheckForPreviousInstallations is a toggle for skipping the check - // if there are any previous installations of this Helm chart in the cluster. - SkipCheckForPreviousInstallations bool - ctx environment.TestContext helmOptions *helm.Options releaseName string @@ -113,9 +109,7 @@ func (h *HelmCluster) Create(t *testing.T) { }) // Fail if there are any existing installations of the Helm chart. - if !h.SkipCheckForPreviousInstallations { - helpers.CheckForPriorInstallations(t, h.kubernetesClient, h.helmOptions, "consul-helm", "chart=consul-helm") - } + helpers.CheckForPriorInstallations(t, h.kubernetesClient, h.helmOptions, "consul-helm", "chart=consul-helm") chartName := config.HelmChartPath if h.helmOptions.Version != config.HelmChartPath { @@ -140,11 +134,7 @@ func (h *HelmCluster) Destroy(t *testing.T) { // Ignore the error returned by the helm delete here so that we can // always idempotently clean up resources in the cluster. - h.helmOptions.ExtraArgs = map[string][]string{ - "--wait": nil, - } - err := helm.DeleteE(t, h.helmOptions, h.releaseName, false) - require.NoError(t, err) + _ = helm.DeleteE(t, h.helmOptions, h.releaseName, false) // Retry because sometimes certain resources (like PVC) take time to delete // in cloud providers. @@ -575,8 +565,9 @@ func configureSCCs(t *testing.T, client kubernetes.Interface, cfg *config.TestCo func defaultValues() map[string]string { values := map[string]string{ - "global.logLevel": "debug", - "server.replicas": "1", + "server.replicas": "1", + "connectInject.envoyExtraArgs": "--log-level debug", + "connectInject.logLevel": "debug", // Disable DNS since enabling it changes the policy for the anonymous token, // which could result in tests passing due to that token having privileges to read services // (false positive). diff --git a/acceptance/framework/consul/helm_cluster_test.go b/acceptance/framework/consul/helm_cluster_test.go index af70812f9a..552aed065c 100644 --- a/acceptance/framework/consul/helm_cluster_test.go +++ b/acceptance/framework/consul/helm_cluster_test.go @@ -23,38 +23,41 @@ func TestNewHelmCluster(t *testing.T) { name: "defaults are used when no helmValues are set", helmValues: map[string]string{}, want: map[string]string{ - "global.image": "test-config-image", - "global.logLevel": "debug", - "server.replicas": "1", + "global.image": "test-config-image", + "server.replicas": "1", + "connectInject.envoyExtraArgs": "--log-level debug", + "connectInject.logLevel": "debug", "connectInject.transparentProxy.defaultEnabled": "false", - "dns.enabled": "false", - "server.extraConfig": `"{\"log_level\": \"TRACE\"}"`, - "client.extraConfig": `"{\"log_level\": \"TRACE\"}"`, + "dns.enabled": "false", + "server.extraConfig": `"{\"log_level\": \"TRACE\"}"`, + "client.extraConfig": `"{\"log_level\": \"TRACE\"}"`, }, }, { name: "when using helmValues, defaults are overridden", helmValues: map[string]string{ - "global.image": "test-image", - "global.logLevel": "debug", - "server.bootstrapExpect": "3", - "server.replicas": "3", + "global.image": "test-image", + "server.bootstrapExpect": "3", + "server.replicas": "3", + "connectInject.envoyExtraArgs": "--foo", + "connectInject.logLevel": "debug", "connectInject.transparentProxy.defaultEnabled": "true", - "dns.enabled": "true", - "server.extraConfig": `"{\"foo\": \"bar\"}"`, - "client.extraConfig": `"{\"foo\": \"bar\"}"`, - "feature.enabled": "true", + "dns.enabled": "true", + "server.extraConfig": `"{\"foo\": \"bar\"}"`, + "client.extraConfig": `"{\"foo\": \"bar\"}"`, + "feature.enabled": "true", }, want: map[string]string{ - "global.image": "test-image", - "global.logLevel": "debug", - "server.bootstrapExpect": "3", - "server.replicas": "3", + "global.image": "test-image", + "server.bootstrapExpect": "3", + "server.replicas": "3", + "connectInject.envoyExtraArgs": "--foo", + "connectInject.logLevel": "debug", "connectInject.transparentProxy.defaultEnabled": "true", - "dns.enabled": "true", - "server.extraConfig": `"{\"foo\": \"bar\"}"`, - "client.extraConfig": `"{\"foo\": \"bar\"}"`, - "feature.enabled": "true", + "dns.enabled": "true", + "server.extraConfig": `"{\"foo\": \"bar\"}"`, + "client.extraConfig": `"{\"foo\": \"bar\"}"`, + "feature.enabled": "true", }, }, } diff --git a/acceptance/framework/flags/flags.go b/acceptance/framework/flags/flags.go index 5d90d74f9e..81f8131efb 100644 --- a/acceptance/framework/flags/flags.go +++ b/acceptance/framework/flags/flags.go @@ -41,9 +41,8 @@ type TestFlags struct { flagDebugDirectory string - flagUseAKS bool - flagUseGKE bool flagUseKind bool + flagUseGKE bool flagDisablePeering bool @@ -106,12 +105,10 @@ func (t *TestFlags) init() { flag.StringVar(&t.flagDebugDirectory, "debug-directory", "", "The directory where to write debug information about failed test runs, "+ "such as logs and pod definitions. If not provided, a temporary directory will be created by the tests.") - flag.BoolVar(&t.flagUseAKS, "use-aks", false, - "If true, the tests will assume they are running against an AKS cluster(s).") - flag.BoolVar(&t.flagUseGKE, "use-gke", false, - "If true, the tests will assume they are running against a GKE cluster(s).") flag.BoolVar(&t.flagUseKind, "use-kind", false, "If true, the tests will assume they are running against a local kind cluster(s).") + flag.BoolVar(&t.flagUseGKE, "use-gke", false, + "If true, the tests will assume they are running against a GKE cluster(s).") flag.BoolVar(&t.flagDisablePeering, "disable-peering", false, "If true, the peering tests will not run.") @@ -171,8 +168,7 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig { NoCleanupOnFailure: t.flagNoCleanupOnFailure, DebugDirectory: tempDir, - UseAKS: t.flagUseAKS, - UseGKE: t.flagUseGKE, UseKind: t.flagUseKind, + UseGKE: t.flagUseGKE, } } diff --git a/acceptance/framework/k8s/deploy.go b/acceptance/framework/k8s/deploy.go index 869ebdd804..2a258dcd96 100644 --- a/acceptance/framework/k8s/deploy.go +++ b/acceptance/framework/k8s/deploy.go @@ -96,7 +96,7 @@ func CheckStaticServerConnectionMultipleFailureMessages(t *testing.T, options *k expectedOutput = expectedSuccessOutput } - retrier := &retry.Timer{Timeout: 320 * time.Second, Wait: 2 * time.Second} + retrier := &retry.Timer{Timeout: 160 * time.Second, Wait: 2 * time.Second} args := []string{"exec", "deploy/" + sourceApp, "-c", sourceApp, "--", "curl", "-vvvsSf"} args = append(args, curlArgs...) diff --git a/acceptance/go.mod b/acceptance/go.mod index 5c8893cac5..b7b11bc27c 100644 --- a/acceptance/go.mod +++ b/acceptance/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/consul-k8s/acceptance -go 1.19 +go 1.18 require ( github.com/gruntwork-io/terratest v0.31.2 diff --git a/acceptance/go.sum b/acceptance/go.sum index 4097158e83..8bb13efef3 100644 --- a/acceptance/go.sum +++ b/acceptance/go.sum @@ -184,8 +184,6 @@ github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= @@ -264,10 +262,7 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -281,7 +276,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-containerregistry v0.0.0-20200110202235-f4fb41bf00a3/go.mod h1:2wIuQute9+hhWqvL3vEI7YB0EKluF4WcPzI1eAliazk= @@ -388,8 +382,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= @@ -490,8 +482,6 @@ github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -506,10 +496,7 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -523,7 +510,6 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -531,7 +517,6 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -627,8 +612,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -741,14 +724,10 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= @@ -815,21 +794,14 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -896,10 +868,8 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -946,8 +916,6 @@ google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -977,11 +945,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1028,13 +993,9 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= -k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= -k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= -k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= @@ -1075,8 +1036,6 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/acceptance/tests/basic/basic_test.go b/acceptance/tests/basic/basic_test.go index 91047711a1..0edab960d9 100644 --- a/acceptance/tests/basic/basic_test.go +++ b/acceptance/tests/basic/basic_test.go @@ -50,7 +50,6 @@ func TestBasicInstallation(t *testing.T) { "global.tls.enabled": strconv.FormatBool(c.secure), "global.gossipEncryption.autoGenerate": strconv.FormatBool(c.secure), "global.tls.enableAutoEncrypt": strconv.FormatBool(c.autoEncrypt), - "client.enabled": "true", } consulCluster := consul.NewHelmCluster(t, helmValues, suite.Environment().DefaultContext(t), suite.Config(), releaseName) diff --git a/acceptance/tests/cli/cli_install_test.go b/acceptance/tests/cli/cli_install_test.go deleted file mode 100644 index f8c40d3194..0000000000 --- a/acceptance/tests/cli/cli_install_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package cli - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/hashicorp/consul-k8s/acceptance/framework/cli" - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/stretchr/testify/require" -) - -const ipv4RegEx = "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" - -// TestInstall tests that we can install consul service mesh with the CLI -// and see that services can connect. -func TestInstall(t *testing.T) { - cases := map[string]struct { - secure bool - }{ - "not-secure": {secure: false}, - "secure": {secure: true}, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - cli, err := cli.NewCLI() - require.NoError(t, err) - - cfg := suite.Config() - ctx := suite.Environment().DefaultContext(t) - - connHelper := connhelper.ConnectHelper{ - ClusterKind: consul.CLI, - Secure: c.secure, - ReleaseName: consul.CLIReleaseName, - Ctx: ctx, - Cfg: cfg, - } - - connHelper.Setup(t) - - connHelper.Install(t) - connHelper.DeployClientAndServer(t) - if c.secure { - connHelper.TestConnectionFailureWithoutIntention(t) - connHelper.CreateIntention(t) - } - - // Run proxy list and get the two results. - listOut, err := cli.Run(t, ctx.KubectlOptions(t), "proxy", "list") - require.NoError(t, err) - logger.Log(t, string(listOut)) - list := translateListOutput(listOut) - require.Equal(t, 2, len(list)) - for _, proxyType := range list { - require.Equal(t, "Sidecar", proxyType) - } - - // Run proxy read and check that the connection is present in the output. - retrier := &retry.Timer{Timeout: 160 * time.Second, Wait: 2 * time.Second} - retry.RunWith(retrier, t, func(r *retry.R) { - for podName := range list { - out, err := cli.Run(t, ctx.KubectlOptions(t), "proxy", "read", podName) - require.NoError(t, err) - - output := string(out) - logger.Log(t, output) - - // Both proxies must see their own local agent and app as clusters. - require.Regexp(r, "consul-dataplane.*STATIC", output) - require.Regexp(r, "local_app.*STATIC", output) - - // Static Client must have Static Server as a cluster and endpoint. - if strings.Contains(podName, "static-client") { - require.Regexp(r, "static-server.*static-server\\.default\\.dc1\\.internal.*EDS", output) - require.Regexp(r, ipv4RegEx+".*static-server", output) - } - } - }) - - connHelper.TestConnectionSuccess(t) - connHelper.TestConnectionFailureWhenUnhealthy(t) - }) - } -} - -// translateListOutput takes the raw output from the proxy list command and -// translates the table into a map. -func translateListOutput(raw []byte) map[string]string { - formatted := make(map[string]string) - for _, pod := range strings.Split(strings.TrimSpace(string(raw)), "\n")[3:] { - row := strings.Split(strings.TrimSpace(pod), "\t") - - var name string - if len(row) == 3 { // Handle the case where namespace is present - name = fmt.Sprintf("%s/%s", strings.TrimSpace(row[0]), strings.TrimSpace(row[1])) - } else if len(row) == 2 { - name = strings.TrimSpace(row[0]) - } - formatted[name] = row[len(row)-1] - } - - return formatted -} diff --git a/acceptance/tests/cli/cli_upgrade_test.go b/acceptance/tests/cli/cli_upgrade_test.go deleted file mode 100644 index 6fcf82f738..0000000000 --- a/acceptance/tests/cli/cli_upgrade_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package cli - -import ( - "context" - "testing" - - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// TestConnectInjectOnUpgrade tests that Connect works before and after an -// upgrade is performed on the cluster. -func TestUpgrade(t *testing.T) { - cfg := suite.Config() - ctx := suite.Environment().DefaultContext(t) - - connHelper := connhelper.ConnectHelper{ - ClusterKind: consul.CLI, - ReleaseName: consul.CLIReleaseName, - Ctx: ctx, - Cfg: cfg, - } - - connHelper.Setup(t) - - connHelper.Install(t) - - // Change a value on the connect-injector to force an update. - connHelper.HelmValues = map[string]string{ - "ingressGateways.enabled": "true", - "ingressGateways.defaults.replicas": "1", - } - - connHelper.Upgrade(t) - - t.Log("checking that the ingress gateway was install as a result of the upgrade") - k8sClient := ctx.KubernetesClient(t) - igwPods, err := k8sClient.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{LabelSelector: "component=ingress-gateway"}) - require.NoError(t, err) - require.Len(t, igwPods.Items, 1) -} diff --git a/acceptance/tests/cli/main_test.go b/acceptance/tests/cli/main_test.go deleted file mode 100644 index 85cef25abe..0000000000 --- a/acceptance/tests/cli/main_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package cli - -import ( - "os" - "testing" - - testsuite "github.com/hashicorp/consul-k8s/acceptance/framework/suite" -) - -var suite testsuite.Suite - -func TestMain(m *testing.M) { - suite = testsuite.NewSuite(m) - os.Exit(suite.Run()) -} diff --git a/acceptance/tests/connect/connect_external_servers_test.go b/acceptance/tests/connect/connect_external_servers_test.go deleted file mode 100644 index e1f4c10f19..0000000000 --- a/acceptance/tests/connect/connect_external_servers_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package connect - -import ( - "context" - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/api" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// TestConnectInject_ExternalServers tests that connect works when using external servers. -// It sets up an external Consul server in the same cluster but a different Helm installation -// and then treats this server as external. -func TestConnectInject_ExternalServers(t *testing.T) { - for _, secure := range []bool{ - false, - true, - } { - caseName := fmt.Sprintf("secure: %t", secure) - t.Run(caseName, func(t *testing.T) { - cfg := suite.Config() - ctx := suite.Environment().DefaultContext(t) - - serverHelmValues := map[string]string{ - "global.acls.manageSystemACLs": strconv.FormatBool(secure), - "global.tls.enabled": strconv.FormatBool(secure), - - // Don't install injector, controller and cni on this cluster so that it's not installed twice. - "controller.enabled": "false", - "connectInject.enabled": "false", - "connectInject.cni.enabled": "false", - } - serverReleaseName := helpers.RandomName() - consulServerCluster := consul.NewHelmCluster(t, serverHelmValues, ctx, cfg, serverReleaseName) - - consulServerCluster.Create(t) - - helmValues := map[string]string{ - "server.enabled": "false", - "global.acls.manageSystemACLs": strconv.FormatBool(secure), - - "global.tls.enabled": strconv.FormatBool(secure), - - "connectInject.enabled": "true", - - "externalServers.enabled": "true", - "externalServers.hosts[0]": fmt.Sprintf("%s-consul-server", serverReleaseName), - "externalServers.httpsPort": "8500", - } - - if secure { - helmValues["global.tls.caCert.secretName"] = fmt.Sprintf("%s-consul-ca-cert", serverReleaseName) - helmValues["global.tls.caCert.secretKey"] = "tls.crt" - helmValues["global.acls.bootstrapToken.secretName"] = fmt.Sprintf("%s-consul-bootstrap-acl-token", serverReleaseName) - helmValues["global.acls.bootstrapToken.secretKey"] = "token" - helmValues["externalServers.httpsPort"] = "8501" - } - - releaseName := helpers.RandomName() - consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) - consulCluster.SkipCheckForPreviousInstallations = true - - consulCluster.Create(t) - - logger.Log(t, "creating static-server and static-client deployments") - k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") - if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") - } else { - k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") - } - - // Check that both static-server and static-client have been injected and now have 2 containers. - for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := ctx.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - require.NoError(t, err) - require.Len(t, podList.Items, 1) - require.Len(t, podList.Items[0].Spec.Containers, 2) - } - - if secure { - consulClient, _ := consulServerCluster.SetupConsulClient(t, true) - - logger.Log(t, "checking that the connection is not successful because there's no intention") - if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://static-server") - } else { - k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:1234") - } - - intention := &api.ServiceIntentionsConfigEntry{ - Kind: api.ServiceIntentions, - Name: connhelper.StaticServerName, - Sources: []*api.SourceIntention{ - { - Name: connhelper.StaticClientName, - Action: api.IntentionActionAllow, - }, - }, - } - - logger.Log(t, "creating intention") - _, _, err := consulClient.ConfigEntries().Set(intention, nil) - require.NoError(t, err) - } - - logger.Log(t, "checking that connection is successful") - if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://static-server") - } else { - k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:1234") - } - - // Test that kubernetes readiness status is synced to Consul. - // Create the file so that the readiness probe of the static-server pod fails. - logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, ctx.KubectlOptions(t), "exec", "deploy/"+connhelper.StaticServerName, "--", "touch", "/tmp/unhealthy") - - // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry - // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. - // We are expecting a "connection reset by peer" error because in a case of health checks, - // there will be no healthy proxy host to connect to. That's why we can't assert that we receive an empty reply - // from server, which is the case when a connection is unsuccessful due to intentions in other tests. - logger.Log(t, "checking that connection is unsuccessful") - if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server port 80: Connection refused"}, "", "http://static-server") - } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - } - }) - } -} diff --git a/acceptance/framework/connhelper/connect_helper.go b/acceptance/tests/connect/connect_helper.go similarity index 92% rename from acceptance/framework/connhelper/connect_helper.go rename to acceptance/tests/connect/connect_helper.go index c5d677ba6f..473c2ab562 100644 --- a/acceptance/framework/connhelper/connect_helper.go +++ b/acceptance/tests/connect/connect_helper.go @@ -1,10 +1,9 @@ -package connhelper +package connect import ( "context" "strconv" "testing" - "time" "github.com/hashicorp/consul-k8s/acceptance/framework/config" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" @@ -20,7 +19,7 @@ import ( const ( StaticClientName = "static-client" - StaticServerName = "static-server" + staticServerName = "static-server" ) // ConnectHelper configures a Consul cluster for connect injection tests. @@ -32,13 +31,16 @@ type ConnectHelper struct { // Secure configures the Helm chart for the test to use ACL tokens. Secure bool + // AutoEncrypt configures the Helm chart for the test to use AutoEncrypt. + AutoEncrypt bool + // HelmValues are the additional helm values to use when installing or // upgrading the cluster beyond connectInject.enabled, global.tls.enabled, - // global.tls.enableAutoEncrypt, global.acls.manageSystemACLs which are + // global.tls.enableAutoEncrypt, global.acls.mangageSystemACLs which are // set by the Secure and AutoEncrypt fields. HelmValues map[string]string - // ReleaseName is the name of the Consul cluster. + // RelaseName is the name of the Consul cluster. ReleaseName string Ctx environment.TestContext @@ -91,12 +93,11 @@ func (c *ConnectHelper) DeployClientAndServer(t *testing.T) { // deployments because golang will execute them in reverse order // (i.e. the last registered cleanup function will be executed first). t.Cleanup(func() { - retrier := &retry.Timer{Timeout: 30 * time.Second, Wait: 100 * time.Millisecond} - retry.RunWith(retrier, t, func(r *retry.R) { + retry.Run(t, func(r *retry.R) { tokens, _, err := c.consulClient.ACL().TokenList(nil) require.NoError(r, err) for _, token := range tokens { - require.NotContains(r, token.Description, StaticServerName) + require.NotContains(r, token.Description, staticServerName) require.NotContains(r, token.Description, StaticClientName) } }) @@ -141,7 +142,7 @@ func (c *ConnectHelper) CreateIntention(t *testing.T) { logger.Log(t, "creating intention") _, _, err := c.consulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ Kind: api.ServiceIntentions, - Name: StaticServerName, + Name: staticServerName, Sources: []*api.SourceIntention{ { Name: StaticClientName, @@ -152,7 +153,7 @@ func (c *ConnectHelper) CreateIntention(t *testing.T) { require.NoError(t, err) } -// TestConnectionSuccess ensures the static-server pod can connect to the +// TestConnectionSuccessful ensures the static-server pod can connect to the // static-client pod once the intention is set. func (c *ConnectHelper) TestConnectionSuccess(t *testing.T) { logger.Log(t, "checking that connection is successful") @@ -172,7 +173,7 @@ func (c *ConnectHelper) TestConnectionFailureWhenUnhealthy(t *testing.T) { // Create a file called "unhealthy" at "/tmp/" so that the readiness probe // of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, c.Ctx.KubectlOptions(t), "exec", "deploy/"+StaticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, c.Ctx.KubectlOptions(t), "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, // CheckStaticServerConnection will retry until Consul marks the service @@ -197,7 +198,7 @@ func (c *ConnectHelper) TestConnectionFailureWhenUnhealthy(t *testing.T) { } // Return the static-server to a "healthy state". - k8s.RunKubectl(t, c.Ctx.KubectlOptions(t), "exec", "deploy/"+StaticServerName, "--", "rm", "/tmp/unhealthy") + k8s.RunKubectl(t, c.Ctx.KubectlOptions(t), "exec", "deploy/"+staticServerName, "--", "rm", "/tmp/unhealthy") } // helmValues uses the Secure and AutoEncrypt fields to set values for the Helm @@ -207,9 +208,8 @@ func (c *ConnectHelper) helmValues() map[string]string { helmValues := map[string]string{ "connectInject.enabled": "true", "global.tls.enabled": strconv.FormatBool(c.Secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.AutoEncrypt), "global.acls.manageSystemACLs": strconv.FormatBool(c.Secure), - "dns.enabled": "true", - "dns.enableRedirection": "true", } helpers.MergeMaps(helmValues, c.HelmValues) diff --git a/acceptance/tests/connect/connect_inject_namespaces_test.go b/acceptance/tests/connect/connect_inject_namespaces_test.go index db48465bda..22fcc19c7a 100644 --- a/acceptance/tests/connect/connect_inject_namespaces_test.go +++ b/acceptance/tests/connect/connect_inject_namespaces_test.go @@ -8,7 +8,6 @@ import ( "testing" terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" @@ -67,6 +66,7 @@ func TestConnectInjectNamespaces(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) + cfg := suite.Config() helmValues := map[string]string{ "global.enableConsulNamespaces": "true", @@ -129,13 +129,13 @@ func TestConnectInjectNamespaces(t *testing.T) { tokens, _, err := consulClient.ACL().TokenList(serverQueryOpts) require.NoError(r, err) for _, token := range tokens { - require.NotContains(r, token.Description, connhelper.StaticServerName) + require.NotContains(r, token.Description, staticServerName) } tokens, _, err = consulClient.ACL().TokenList(clientQueryOpts) require.NoError(r, err) for _, token := range tokens { - require.NotContains(r, token.Description, connhelper.StaticClientName) + require.NotContains(r, token.Description, StaticClientName) } }) } @@ -166,29 +166,29 @@ func TestConnectInjectNamespaces(t *testing.T) { // Kubernetes namespace. // If a single destination namespace is set, we expect all services // to be registered in that destination Consul namespace. - services, _, err := consulClient.Catalog().Service(connhelper.StaticServerName, "", serverQueryOpts) + services, _, err := consulClient.Catalog().Service(staticServerName, "", serverQueryOpts) require.NoError(t, err) require.Len(t, services, 1) - services, _, err = consulClient.Catalog().Service(connhelper.StaticClientName, "", clientQueryOpts) + services, _, err = consulClient.Catalog().Service(StaticClientName, "", clientQueryOpts) require.NoError(t, err) require.Len(t, services, 1) if c.secure { logger.Log(t, "checking that the connection is not successful because there's no intention") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, connhelper.StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, connhelper.StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, StaticClientName, "http://localhost:1234") } intention := &api.ServiceIntentionsConfigEntry{ Kind: api.ServiceIntentions, - Name: connhelper.StaticServerName, + Name: staticServerName, Namespace: staticServerNamespace, Sources: []*api.SourceIntention{ { - Name: connhelper.StaticClientName, + Name: StaticClientName, Namespace: StaticClientNamespace, Action: api.IntentionActionAllow, }, @@ -209,15 +209,15 @@ func TestConnectInjectNamespaces(t *testing.T) { logger.Log(t, "checking that connection is successful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, connhelper.StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, connhelper.StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, StaticClientName, "http://localhost:1234") } // Test that kubernetes readiness status is synced to Consul. // Create the file so that the readiness probe of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, staticServerOpts, "exec", "deploy/"+connhelper.StaticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, staticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. @@ -226,9 +226,9 @@ func TestConnectInjectNamespaces(t *testing.T) { // from server, which is the case when a connection is unsuccessful due to intentions in other tests. logger.Log(t, "checking that connection is unsuccessful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") } }) } diff --git a/acceptance/tests/connect/connect_inject_test.go b/acceptance/tests/connect/connect_inject_test.go index 2e75846884..ec694b8a95 100644 --- a/acceptance/tests/connect/connect_inject_test.go +++ b/acceptance/tests/connect/connect_inject_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" + "github.com/hashicorp/consul-k8s/acceptance/framework/cli" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" @@ -19,25 +19,61 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TestConnectInject tests that Connect works in a default and a secure installation using Helm CLI. +const ipv4RegEx = "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + +// TestConnectInject tests that Connect works in a default and a secure installation. func TestConnectInject(t *testing.T) { cases := map[string]struct { - secure bool + clusterKind consul.ClusterKind + releaseName string + secure bool + autoEncrypt bool }{ - "not-secure": {secure: false}, - "secure": {secure: true}, + "Helm install without secure or auto-encrypt": { + clusterKind: consul.Helm, + releaseName: helpers.RandomName(), + }, + "Helm install with secure": { + clusterKind: consul.Helm, + releaseName: helpers.RandomName(), + secure: true, + }, + "Helm install with secure and auto-encrypt": { + clusterKind: consul.Helm, + releaseName: helpers.RandomName(), + secure: true, + autoEncrypt: true, + }, + "CLI install without secure or auto-encrypt": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + }, + "CLI install with secure": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + secure: true, + }, + "CLI install with secure and auto-encrypt": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + secure: true, + autoEncrypt: true, + }, } for name, c := range cases { t.Run(name, func(t *testing.T) { + cli, err := cli.NewCLI() + require.NoError(t, err) + cfg := suite.Config() ctx := suite.Environment().DefaultContext(t) - releaseName := helpers.RandomName() - connHelper := connhelper.ConnectHelper{ - ClusterKind: consul.Helm, + connHelper := ConnectHelper{ + ClusterKind: c.clusterKind, Secure: c.secure, - ReleaseName: releaseName, + AutoEncrypt: c.autoEncrypt, + ReleaseName: c.releaseName, Ctx: ctx, Cfg: cfg, } @@ -51,6 +87,99 @@ func TestConnectInject(t *testing.T) { connHelper.CreateIntention(t) } + // Run proxy list and get the two results. + listOut, err := cli.Run(t, ctx.KubectlOptions(t), "proxy", "list") + require.NoError(t, err) + logger.Log(t, string(listOut)) + list := translateListOutput(listOut) + require.Equal(t, 2, len(list)) + for _, proxyType := range list { + require.Equal(t, "Sidecar", proxyType) + } + + // Run proxy read and check that the connection is present in the output. + retrier := &retry.Timer{Timeout: 160 * time.Second, Wait: 2 * time.Second} + retry.RunWith(retrier, t, func(r *retry.R) { + for podName := range list { + out, err := cli.Run(t, ctx.KubectlOptions(t), "proxy", "read", podName) + require.NoError(t, err) + + output := string(out) + logger.Log(t, output) + + // Both proxies must see their own local agent and app as clusters. + require.Regexp(r, "local_agent.*STATIC", output) + require.Regexp(r, "local_app.*STATIC", output) + + // Static Client must have Static Server as a cluster and endpoint. + if strings.Contains(podName, "static-client") { + require.Regexp(r, "static-server.*static-server\\.default\\.dc1\\.internal.*EDS", output) + require.Regexp(r, ipv4RegEx+".*static-server", output) + } + + } + }) + + connHelper.TestConnectionSuccess(t) + connHelper.TestConnectionFailureWhenUnhealthy(t) + }) + } +} + +// TestConnectInjectOnUpgrade tests that Connect works before and after an +// upgrade is performed on the cluster. +func TestConnectInjectOnUpgrade(t *testing.T) { + cases := map[string]struct { + clusterKind consul.ClusterKind + releaseName string + initial, upgrade map[string]string + }{ + "CLI upgrade changes nothing": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + }, + "CLI upgrade to enable ingressGateway": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + initial: map[string]string{}, + upgrade: map[string]string{ + "ingressGateways.enabled": "true", + "ingressGateways.defaults.replicas": "1", + }, + }, + "CLI upgrade to enable UI": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + initial: map[string]string{}, + upgrade: map[string]string{ + "ui.enabled": "true", + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + cfg := suite.Config() + ctx := suite.Environment().DefaultContext(t) + + connHelper := ConnectHelper{ + ClusterKind: c.clusterKind, + HelmValues: c.initial, + ReleaseName: c.releaseName, + Ctx: ctx, + Cfg: cfg, + } + + connHelper.Setup(t) + + connHelper.Install(t) + connHelper.DeployClientAndServer(t) + connHelper.TestConnectionSuccess(t) + connHelper.TestConnectionFailureWhenUnhealthy(t) + + connHelper.HelmValues = c.upgrade + + connHelper.Upgrade(t) connHelper.TestConnectionSuccess(t) connHelper.TestConnectionFailureWhenUnhealthy(t) }) @@ -59,16 +188,26 @@ func TestConnectInject(t *testing.T) { // Test the endpoints controller cleans up force-killed pods. func TestConnectInject_CleanupKilledPods(t *testing.T) { - for _, secure := range []bool{false, true} { - name := fmt.Sprintf("secure: %t", secure) + cases := []struct { + secure bool + autoEncrypt bool + }{ + {false, false}, + {true, false}, + {true, true}, + } + + for _, c := range cases { + name := fmt.Sprintf("secure: %t; auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { cfg := suite.Config() ctx := suite.Environment().DefaultContext(t) helmValues := map[string]string{ "connectInject.enabled": "true", - "global.tls.enabled": strconv.FormatBool(secure), - "global.acls.manageSystemACLs": strconv.FormatBool(secure), + "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.autoEncrypt), + "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), } releaseName := helpers.RandomName() @@ -80,7 +219,7 @@ func TestConnectInject_CleanupKilledPods(t *testing.T) { k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") logger.Log(t, "waiting for static-client to be registered with Consul") - consulClient, _ := consulCluster.SetupConsulClient(t, secure) + consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) retry.Run(t, func(r *retry.R) { for _, name := range []string{"static-client", "static-client-sidecar-proxy"} { instances, _, err := consulClient.Catalog().Service(name, "", nil) @@ -120,6 +259,48 @@ func TestConnectInject_CleanupKilledPods(t *testing.T) { } } +// Test that when Consul clients are restarted and lose all their registrations, +// the services get re-registered and can continue to talk to each other. +func TestConnectInject_RestartConsulClients(t *testing.T) { + cfg := suite.Config() + ctx := suite.Environment().DefaultContext(t) + + helmValues := map[string]string{ + "connectInject.enabled": "true", + } + + releaseName := helpers.RandomName() + consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) + + consulCluster.Create(t) + + logger.Log(t, "creating static-server and static-client deployments") + k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + if cfg.EnableTransparentProxy { + k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + } else { + k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + } + + logger.Log(t, "checking that connection is successful") + if cfg.EnableTransparentProxy { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://static-server") + } else { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:1234") + } + + logger.Log(t, "restarting Consul client daemonset") + k8s.RunKubectl(t, ctx.KubectlOptions(t), "rollout", "restart", fmt.Sprintf("ds/%s-consul-client", releaseName)) + k8s.RunKubectl(t, ctx.KubectlOptions(t), "rollout", "status", fmt.Sprintf("ds/%s-consul-client", releaseName)) + + logger.Log(t, "checking that connection is still successful") + if cfg.EnableTransparentProxy { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://static-server") + } else { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:1234") + } +} + const multiport = "multiport" const multiportAdmin = "multiport-admin" @@ -127,8 +308,17 @@ const multiportAdmin = "multiport-admin" // two ports. This tests inbound connections to each port of the multiport app, and outbound connections from the // multiport app to static-server. func TestConnectInject_MultiportServices(t *testing.T) { - for _, secure := range []bool{false, true} { - name := fmt.Sprintf("secure: %t", secure) + cases := []struct { + secure bool + autoEncrypt bool + }{ + {false, false}, + {true, false}, + {true, true}, + } + + for _, c := range cases { + name := fmt.Sprintf("secure: %t; auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { cfg := suite.Config() ctx := suite.Environment().DefaultContext(t) @@ -141,8 +331,9 @@ func TestConnectInject_MultiportServices(t *testing.T) { helmValues := map[string]string{ "connectInject.enabled": "true", - "global.tls.enabled": strconv.FormatBool(secure), - "global.acls.manageSystemACLs": strconv.FormatBool(secure), + "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.autoEncrypt), + "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), } releaseName := helpers.RandomName() @@ -150,10 +341,10 @@ func TestConnectInject_MultiportServices(t *testing.T) { consulCluster.Create(t) - consulClient, _ := consulCluster.SetupConsulClient(t, secure) + consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) // Check that the ACL token is deleted. - if secure { + if c.secure { // We need to register the cleanup function before we create the deployments // because golang will execute them in reverse order i.e. the last registered // cleanup function will be executed first. @@ -165,8 +356,8 @@ func TestConnectInject_MultiportServices(t *testing.T) { for _, token := range tokens { require.NotContains(r, token.Description, multiport) require.NotContains(r, token.Description, multiportAdmin) - require.NotContains(r, token.Description, connhelper.StaticClientName) - require.NotContains(r, token.Description, connhelper.StaticServerName) + require.NotContains(r, token.Description, StaticClientName) + require.NotContains(r, token.Description, staticServerName) } }) }) @@ -192,10 +383,10 @@ func TestConnectInject_MultiportServices(t *testing.T) { require.Len(t, podList.Items, 1) require.Len(t, podList.Items[0].Spec.Containers, 4) - if secure { + if c.secure { logger.Log(t, "checking that the connection is not successful because there's no intention") - k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:2234") + k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:2234") logger.Log(t, fmt.Sprintf("creating intention for %s", multiport)) _, _, err := consulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ @@ -203,7 +394,7 @@ func TestConnectInject_MultiportServices(t *testing.T) { Name: multiport, Sources: []*api.SourceIntention{ { - Name: connhelper.StaticClientName, + Name: StaticClientName, Action: api.IntentionActionAllow, }, }, @@ -215,7 +406,7 @@ func TestConnectInject_MultiportServices(t *testing.T) { Name: multiportAdmin, Sources: []*api.SourceIntention{ { - Name: connhelper.StaticClientName, + Name: StaticClientName, Action: api.IntentionActionAllow, }, }, @@ -224,10 +415,10 @@ func TestConnectInject_MultiportServices(t *testing.T) { } // Check connection from static-client to multiport. - k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:1234") // Check connection from static-client to multiport-admin. - k8s.CheckStaticServerConnectionSuccessfulWithMessage(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "hello world from 9090 admin", "http://localhost:2234") + k8s.CheckStaticServerConnectionSuccessfulWithMessage(t, ctx.KubectlOptions(t), StaticClientName, "hello world from 9090 admin", "http://localhost:2234") // Now that we've checked inbound connections to a multi port pod, check outbound connection from multi port // pod to static-server. @@ -237,15 +428,15 @@ func TestConnectInject_MultiportServices(t *testing.T) { // For outbound connections from the multi port pod, only intentions from the first service in the multiport // pod need to be created, since all upstream connections are made through the first service's envoy proxy. - if secure { + if c.secure { logger.Log(t, "checking that the connection is not successful because there's no intention") k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), multiport, "http://localhost:3234") - logger.Log(t, fmt.Sprintf("creating intention for %s", connhelper.StaticServerName)) + logger.Log(t, fmt.Sprintf("creating intention for %s", staticServerName)) _, _, err := consulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ Kind: api.ServiceIntentions, - Name: connhelper.StaticServerName, + Name: staticServerName, Sources: []*api.SourceIntention{ { Name: multiport, @@ -272,8 +463,27 @@ func TestConnectInject_MultiportServices(t *testing.T) { // We are expecting a "connection reset by peer" error because in a case of health checks, // there will be no healthy proxy host to connect to. That's why we can't assert that we receive an empty reply // from server, which is the case when a connection is unsuccessful due to intentions in other tests. - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:2234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:2234") }) } } + +// translateListOutput takes the raw output from the proxy list command and +// translates the table into a map. +func translateListOutput(raw []byte) map[string]string { + formatted := make(map[string]string) + for _, pod := range strings.Split(strings.TrimSpace(string(raw)), "\n")[3:] { + row := strings.Split(strings.TrimSpace(pod), "\t") + + var name string + if len(row) == 3 { // Handle the case where namespace is present + name = fmt.Sprintf("%s/%s", strings.TrimSpace(row[0]), strings.TrimSpace(row[1])) + } else if len(row) == 2 { + name = strings.TrimSpace(row[0]) + } + formatted[name] = row[len(row)-1] + } + + return formatted +} diff --git a/acceptance/tests/consul-dns/consul_dns_test.go b/acceptance/tests/consul-dns/consul_dns_test.go index 47cfb4af07..e6973459cc 100644 --- a/acceptance/tests/consul-dns/consul_dns_test.go +++ b/acceptance/tests/consul-dns/consul_dns_test.go @@ -14,16 +14,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const podName = "dns-pod" + func TestConsulDNS(t *testing.T) { cfg := suite.Config() if cfg.EnableCNI { t.Skipf("skipping because -enable-cni is set") } - - if cfg.UseAKS { - t.Skipf("skipping because -use-aks is set") - } - for _, secure := range []bool{false, true} { name := fmt.Sprintf("secure: %t", secure) t.Run(name, func(t *testing.T) { @@ -57,9 +54,8 @@ func TestConsulDNS(t *testing.T) { serverIPs = append(serverIPs, serverPod.Status.PodIP) } - dnsPodName := fmt.Sprintf("%s-dns-pod", releaseName) dnsTestPodArgs := []string{ - "run", "-i", dnsPodName, "--restart", "Never", "--image", "anubhavmishra/tiny-tools", "--", "dig", fmt.Sprintf("@%s-consul-dns", releaseName), "consul.service.consul", + "run", "-i", podName, "--restart", "Never", "--image", "anubhavmishra/tiny-tools", "--", "dig", fmt.Sprintf("@%s-consul-dns", releaseName), "consul.service.consul", } helpers.Cleanup(t, suite.Config().NoCleanupOnFailure, func() { @@ -67,7 +63,7 @@ func TestConsulDNS(t *testing.T) { // This shouldn't cause any test pollution because the underlying // objects are deployments, and so when other tests create these // they should have different pod names. - k8s.RunKubectl(t, ctx.KubectlOptions(t), "delete", "pod", dnsPodName) + k8s.RunKubectl(t, ctx.KubectlOptions(t), "delete", "pod", podName) }) retry.Run(t, func(r *retry.R) { diff --git a/acceptance/tests/controller/controller_test.go b/acceptance/tests/controller/controller_test.go index 9514a71309..9451140aa0 100644 --- a/acceptance/tests/controller/controller_test.go +++ b/acceptance/tests/controller/controller_test.go @@ -32,13 +32,18 @@ func TestController(t *testing.T) { t.Skipf("skipping because -enable-cni is set and controller is already tested with regular tproxy") } cases := []struct { - secure bool - useVault bool + secure bool + autoEncrypt bool + useVault bool }{ - {false, false}, - {true, false}, - {false, true}, - {true, true}, + {false, false, false}, + {true, false, false}, + {true, true, false}, + {true, true, true}, + {false, false, true}, + // Vault with TLS requires autoEncrypt set to true as well, so the below + // is not valid + // {true, false, true}, } // The name of a service intention in consul is @@ -47,7 +52,7 @@ func TestController(t *testing.T) { const IntentionName = "svc1" for _, c := range cases { - name := fmt.Sprintf("secure: %t, vault: %t", c.secure, c.useVault) + name := fmt.Sprintf("secure: %t; auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) @@ -55,6 +60,7 @@ func TestController(t *testing.T) { "controller.enabled": "true", "connectInject.enabled": "true", "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.autoEncrypt), "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), } diff --git a/acceptance/tests/fixtures/bases/mesh-peering/kustomization.yaml b/acceptance/tests/fixtures/bases/mesh-peering/kustomization.yaml deleted file mode 100644 index b48237763e..0000000000 --- a/acceptance/tests/fixtures/bases/mesh-peering/kustomization.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resources: - - meshpeering.yaml diff --git a/acceptance/tests/fixtures/bases/mesh-peering/meshpeering.yaml b/acceptance/tests/fixtures/bases/mesh-peering/meshpeering.yaml deleted file mode 100644 index de84382d3e..0000000000 --- a/acceptance/tests/fixtures/bases/mesh-peering/meshpeering.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: consul.hashicorp.com/v1alpha1 -kind: Mesh -metadata: - name: mesh -spec: - peering: - peerThroughMeshGateways: true diff --git a/acceptance/tests/fixtures/bases/static-metrics-app/deployment.yaml b/acceptance/tests/fixtures/bases/static-metrics-app/deployment.yaml index a3020ddb47..9283a8aae6 100644 --- a/acceptance/tests/fixtures/bases/static-metrics-app/deployment.yaml +++ b/acceptance/tests/fixtures/bases/static-metrics-app/deployment.yaml @@ -24,4 +24,4 @@ spec: - name: METRICS_ENABLE_PROMETHEUS value: "true" ports: - - containerPort: 9090 + - containerPort: 9090 \ No newline at end of file diff --git a/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go b/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go index b713620f1e..d1e5757f91 100644 --- a/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go +++ b/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go @@ -31,10 +31,10 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { secure bool }{ { - secure: false, + false, }, { - secure: true, + true, }, } for _, c := range cases { @@ -42,7 +42,8 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) - igName := "ingress-gateway" + // Install the Helm chart without the ingress gateway first + // so that we can create the namespace for it. helmValues := map[string]string{ "connectInject.enabled": "true", "connectInject.consulNamespaces.consulDestinationNamespace": testNamespace, @@ -50,11 +51,6 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { "global.enableConsulNamespaces": "true", "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), - - "ingressGateways.enabled": "true", - "ingressGateways.gateways[0].name": igName, - "ingressGateways.gateways[0].replicas": "1", - "ingressGateways.gateways[0].consulNamespace": testNamespace, } releaseName := helpers.RandomName() @@ -64,6 +60,25 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) + // Create the destination namespace in the non-secure case. + // In the secure installation, this namespace is created by the server-acl-init job. + if !c.secure { + logger.Logf(t, "creating the %s namespace in Consul", testNamespace) + _, _, err := consulClient.Namespaces().Create(&api.Namespace{ + Name: testNamespace, + }, nil) + require.NoError(t, err) + } + + igName := "ingress-gateway" + logger.Log(t, "upgrading with ingress gateways enabled") + consulCluster.Upgrade(t, map[string]string{ + "ingressGateways.enabled": "true", + "ingressGateways.gateways[0].name": igName, + "ingressGateways.gateways[0].replicas": "1", + "ingressGateways.gateways[0].consulNamespace": testNamespace, + }) + logger.Logf(t, "creating Kubernetes namespace %s", testNamespace) k8s.RunKubectl(t, ctx.KubectlOptions(t), "create", "ns", testNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { @@ -143,6 +158,9 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { // Test we can connect through the ingress gateway when both // the ingress gateway and the connect service are in different namespaces. +// These tests currently only test non-secure and secure without auto-encrypt installations +// because in the case of namespaces there isn't a significant distinction in code between auto-encrypt +// and non-auto-encrypt secure installations, so testing just one is enough. func TestIngressGatewayNamespaceMirroring(t *testing.T) { cfg := suite.Config() if !cfg.EnableEnterprise { @@ -153,10 +171,10 @@ func TestIngressGatewayNamespaceMirroring(t *testing.T) { secure bool }{ { - secure: false, + false, }, { - secure: true, + true, }, } for _, c := range cases { @@ -165,6 +183,8 @@ func TestIngressGatewayNamespaceMirroring(t *testing.T) { ctx := suite.Environment().DefaultContext(t) igName := "ingress" + // Install the Helm chart without the ingress gateway first + // so that we can create the namespace for it. helmValues := map[string]string{ "connectInject.enabled": "true", "connectInject.consulNamespaces.mirroringK8S": "true", diff --git a/acceptance/tests/ingress-gateway/ingress_gateway_test.go b/acceptance/tests/ingress-gateway/ingress_gateway_test.go index b6535439c1..a913d2c024 100644 --- a/acceptance/tests/ingress-gateway/ingress_gateway_test.go +++ b/acceptance/tests/ingress-gateway/ingress_gateway_test.go @@ -18,17 +18,24 @@ const StaticClientName = "static-client" // Test that ingress gateways work in a default installation and a secure installation. func TestIngressGateway(t *testing.T) { cases := []struct { - secure bool + secure bool + autoEncrypt bool }{ { - secure: false, + false, + false, }, { - secure: true, + true, + false, + }, + { + true, + true, }, } for _, c := range cases { - name := fmt.Sprintf("secure: %t", c.secure) + name := fmt.Sprintf("secure: %t; auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) cfg := suite.Config() @@ -41,6 +48,7 @@ func TestIngressGateway(t *testing.T) { "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.autoEncrypt": strconv.FormatBool(c.autoEncrypt), } releaseName := helpers.RandomName() diff --git a/acceptance/tests/wan-federation/main_test.go b/acceptance/tests/mesh-gateway/main_test.go similarity index 72% rename from acceptance/tests/wan-federation/main_test.go rename to acceptance/tests/mesh-gateway/main_test.go index 197a3181e8..fb8935441e 100644 --- a/acceptance/tests/wan-federation/main_test.go +++ b/acceptance/tests/mesh-gateway/main_test.go @@ -1,4 +1,4 @@ -package wanfederation +package meshgateway import ( "fmt" @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { if suite.Config().EnableMultiCluster { os.Exit(suite.Run()) } else { - fmt.Println("Skipping wan federation tests because -enable-multi-cluster is not set") + fmt.Println("Skipping mesh gateway tests because -enable-multi-cluster is not set") os.Exit(0) } } diff --git a/acceptance/tests/mesh-gateway/mesh_gateway_test.go b/acceptance/tests/mesh-gateway/mesh_gateway_test.go new file mode 100644 index 0000000000..557f5befc1 --- /dev/null +++ b/acceptance/tests/mesh-gateway/mesh_gateway_test.go @@ -0,0 +1,301 @@ +package meshgateway + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const StaticClientName = "static-client" + +// Test that Connect and wan federation over mesh gateways work in a default installation +// i.e. without ACLs because TLS is required for WAN federation over mesh gateways. +func TestMeshGatewayDefault(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + + primaryContext := env.DefaultContext(t) + secondaryContext := env.Context(t, environment.SecondaryContextName) + + primaryHelmValues := map[string]string{ + "global.datacenter": "dc1", + "global.tls.enabled": "true", + "global.tls.httpsOnly": "false", + "global.federation.enabled": "true", + "global.federation.createFederationSecret": "true", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + } + + if cfg.UseKind { + primaryHelmValues["meshGateway.service.type"] = "NodePort" + primaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + releaseName := helpers.RandomName() + + // Install the primary consul cluster in the default kubernetes context + primaryConsulCluster := consul.NewHelmCluster(t, primaryHelmValues, primaryContext, cfg, releaseName) + primaryConsulCluster.Create(t) + + // Get the federation secret from the primary cluster and apply it to secondary cluster + federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) + logger.Logf(t, "retrieving federation secret %s from the primary cluster and applying to the secondary", federationSecretName) + federationSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) + federationSecret.ResourceVersion = "" + require.NoError(t, err) + _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) + require.NoError(t, err) + + // Create secondary cluster + secondaryHelmValues := map[string]string{ + "global.datacenter": "dc2", + + "global.tls.enabled": "true", + "global.tls.httpsOnly": "false", + "global.tls.caCert.secretName": federationSecretName, + "global.tls.caCert.secretKey": "caCert", + "global.tls.caKey.secretName": federationSecretName, + "global.tls.caKey.secretKey": "caKey", + + "global.federation.enabled": "true", + + "server.extraVolumes[0].type": "secret", + "server.extraVolumes[0].name": federationSecretName, + "server.extraVolumes[0].load": "true", + "server.extraVolumes[0].items[0].key": "serverConfigJSON", + "server.extraVolumes[0].items[0].path": "config.json", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + } + + if cfg.UseKind { + secondaryHelmValues["meshGateway.service.type"] = "NodePort" + secondaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + // Install the secondary consul cluster in the secondary kubernetes context + secondaryConsulCluster := consul.NewHelmCluster(t, secondaryHelmValues, secondaryContext, cfg, releaseName) + secondaryConsulCluster.Create(t) + + if cfg.UseKind { + // This is a temporary workaround that seems to fix mesh gateway tests on kind 1.22.x. + // TODO (ishustava): we need to investigate this further and remove once we've found the issue. + k8s.RunKubectl(t, primaryContext.KubectlOptions(t), "rollout", "restart", fmt.Sprintf("sts/%s-consul-server", releaseName)) + k8s.RunKubectl(t, primaryContext.KubectlOptions(t), "rollout", "status", fmt.Sprintf("sts/%s-consul-server", releaseName)) + } + + primaryClient, _ := primaryConsulCluster.SetupConsulClient(t, false) + secondaryClient, _ := secondaryConsulCluster.SetupConsulClient(t, false) + + // Verify federation between servers + logger.Log(t, "verifying federation was successful") + helpers.VerifyFederation(t, primaryClient, secondaryClient, releaseName, false) + + // Create a ProxyDefaults resource to configure services to use the mesh + // gateways. + logger.Log(t, "creating proxy-defaults config") + kustomizeDir := "../fixtures/bases/mesh-gateway" + k8s.KubectlApplyK(t, primaryContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, primaryContext.KubectlOptions(t), kustomizeDir) + }) + + // Check that we can connect services over the mesh gateways + logger.Log(t, "creating static-server in dc2") + k8s.DeployKustomize(t, secondaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + + logger.Log(t, "creating static-client in dc1") + k8s.DeployKustomize(t, primaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-multi-dc") + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, primaryContext.KubectlOptions(t), StaticClientName, "http://localhost:1234") +} + +// Test that Connect and wan federation over mesh gateways work in a secure installation, +// with ACLs and TLS with and without auto-encrypt enabled. +func TestMeshGatewaySecure(t *testing.T) { + cases := []struct { + name string + enableAutoEncrypt string + }{ + { + "with ACLs and TLS without auto-encrypt", + "false", + }, + { + "with ACLs and auto-encrypt", + "true", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + + primaryContext := env.DefaultContext(t) + secondaryContext := env.Context(t, environment.SecondaryContextName) + + primaryHelmValues := map[string]string{ + "global.datacenter": "dc1", + "global.tls.enabled": "true", + "global.tls.enableAutoEncrypt": c.enableAutoEncrypt, + + "global.acls.manageSystemACLs": "true", + "global.acls.createReplicationToken": "true", + + "global.federation.enabled": "true", + "global.federation.createFederationSecret": "true", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + } + + if cfg.UseKind { + primaryHelmValues["meshGateway.service.type"] = "NodePort" + primaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + releaseName := helpers.RandomName() + + // Install the primary consul cluster in the default kubernetes context + primaryConsulCluster := consul.NewHelmCluster(t, primaryHelmValues, primaryContext, cfg, releaseName) + primaryConsulCluster.Create(t) + + // Get the federation secret from the primary cluster and apply it to secondary cluster + federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) + logger.Logf(t, "retrieving federation secret %s from the primary cluster and applying to the secondary", federationSecretName) + federationSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) + require.NoError(t, err) + federationSecret.ResourceVersion = "" + _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) + require.NoError(t, err) + + var k8sAuthMethodHost string + // When running on kind, the kube API address in kubeconfig will have a localhost address + // which will not work from inside the container. That's why we need to use the endpoints address instead + // which will point the node IP. + if cfg.UseKind { + // The Kubernetes AuthMethod host is read from the endpoints for the Kubernetes service. + kubernetesEndpoint, err := secondaryContext.KubernetesClient(t).CoreV1().Endpoints("default").Get(context.Background(), "kubernetes", metav1.GetOptions{}) + require.NoError(t, err) + k8sAuthMethodHost = fmt.Sprintf("%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) + } else { + k8sAuthMethodHost = k8s.KubernetesAPIServerHostFromOptions(t, secondaryContext.KubectlOptions(t)) + } + + // Create secondary cluster + secondaryHelmValues := map[string]string{ + "global.datacenter": "dc2", + + "global.tls.enabled": "true", + "global.tls.httpsOnly": "false", + "global.tls.enableAutoEncrypt": c.enableAutoEncrypt, + "global.tls.caCert.secretName": federationSecretName, + "global.tls.caCert.secretKey": "caCert", + "global.tls.caKey.secretName": federationSecretName, + "global.tls.caKey.secretKey": "caKey", + + "global.acls.manageSystemACLs": "true", + "global.acls.replicationToken.secretName": federationSecretName, + "global.acls.replicationToken.secretKey": "replicationToken", + + "global.federation.enabled": "true", + "global.federation.k8sAuthMethodHost": k8sAuthMethodHost, + "global.federation.primaryDatacenter": "dc1", + + "server.extraVolumes[0].type": "secret", + "server.extraVolumes[0].name": federationSecretName, + "server.extraVolumes[0].load": "true", + "server.extraVolumes[0].items[0].key": "serverConfigJSON", + "server.extraVolumes[0].items[0].path": "config.json", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + } + + if cfg.UseKind { + secondaryHelmValues["meshGateway.service.type"] = "NodePort" + secondaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + // Install the secondary consul cluster in the secondary kubernetes context + secondaryConsulCluster := consul.NewHelmCluster(t, secondaryHelmValues, secondaryContext, cfg, releaseName) + secondaryConsulCluster.Create(t) + + if cfg.UseKind { + // This is a temporary workaround that seems to fix mesh gateway tests on kind 1.22.x. + // TODO (ishustava): we need to investigate this further and remove once we've found the issue. + k8s.RunKubectl(t, primaryContext.KubectlOptions(t), "rollout", "restart", fmt.Sprintf("sts/%s-consul-server", releaseName)) + k8s.RunKubectl(t, primaryContext.KubectlOptions(t), "rollout", "status", fmt.Sprintf("sts/%s-consul-server", releaseName)) + } + + primaryClient, _ := primaryConsulCluster.SetupConsulClient(t, true) + secondaryClient, _ := secondaryConsulCluster.SetupConsulClient(t, true) + + // Verify federation between servers + logger.Log(t, "verifying federation was successful") + helpers.VerifyFederation(t, primaryClient, secondaryClient, releaseName, true) + + // Create a ProxyDefaults resource to configure services to use the mesh + // gateways. + logger.Log(t, "creating proxy-defaults config") + kustomizeDir := "../fixtures/bases/mesh-gateway" + k8s.KubectlApplyK(t, secondaryContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, secondaryContext.KubectlOptions(t), kustomizeDir) + }) + + // Check that we can connect services over the mesh gateways + logger.Log(t, "creating static-server in dc2") + k8s.DeployKustomize(t, secondaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + + logger.Log(t, "creating static-client in dc1") + k8s.DeployKustomize(t, primaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-multi-dc") + + logger.Log(t, "creating intention") + _, _, err = primaryClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ + Kind: api.ServiceIntentions, + Name: "static-server", + Sources: []*api.SourceIntention{ + { + Name: StaticClientName, + Action: api.IntentionActionAllow, + }, + }, + }, nil) + require.NoError(t, err) + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, primaryContext.KubectlOptions(t), StaticClientName, "http://localhost:1234") + }) + } +} diff --git a/acceptance/tests/metrics/metrics_test.go b/acceptance/tests/metrics/metrics_test.go index 26d79dfd7c..16bc101125 100644 --- a/acceptance/tests/metrics/metrics_test.go +++ b/acceptance/tests/metrics/metrics_test.go @@ -31,10 +31,6 @@ func TestComponentMetrics(t *testing.T) { "global.datacenter": "dc1", "global.metrics.enabled": "true", "global.metrics.enableAgentMetrics": "true", - // Agents have been removed but there could potentially be customers that are still running them. We - // are using client.enabled to cover that scenario and to make sure agent metrics still works with - // consul-dataplane. - "client.enabled": "true", "connectInject.enabled": "true", "controller.enabled": "true", @@ -81,13 +77,13 @@ func TestComponentMetrics(t *testing.T) { require.NoError(t, err) require.Contains(t, metricsOutput, `consul_acl_ResolveToken{quantile="0.5"}`) - logger.Log(t, "ingress gateway metrics") + // Ingress Gateway Metrics assertGatewayMetricsEnabled(t, ctx, ns, "ingress-gateway", `envoy_cluster_assignment_stale{local_cluster="ingress-gateway",consul_source_service="ingress-gateway"`) - logger.Log(t, "terminating gateway metrics") + // Terminating Gateway Metrics assertGatewayMetricsEnabled(t, ctx, ns, "terminating-gateway", `envoy_cluster_assignment_stale{local_cluster="terminating-gateway",consul_source_service="terminating-gateway"`) - logger.Log(t, "mesh gateway metrics") + // Mesh Gateway Metrics assertGatewayMetricsEnabled(t, ctx, ns, "mesh-gateway", `envoy_cluster_assignment_stale{local_cluster="mesh-gateway",consul_source_service="mesh-gateway"`) } @@ -103,9 +99,6 @@ func TestAppMetrics(t *testing.T) { "global.datacenter": "dc1", "global.metrics.enabled": "true", - // todo (agentless): remove once we have consul-dataplane image with these changes. - "global.imageConsulDataplane": "hashicorppreview/consul-dataplane:1.0-dev", - "connectInject.enabled": "true", "connectInject.metrics.defaultEnableMerging": "true", } diff --git a/acceptance/tests/partitions/partitions_connect_test.go b/acceptance/tests/partitions/partitions_connect_test.go index 1a3c6c901d..e1a43850d6 100644 --- a/acceptance/tests/partitions/partitions_connect_test.go +++ b/acceptance/tests/partitions/partitions_connect_test.go @@ -23,7 +23,7 @@ const staticServerName = "static-server" const staticServerNamespace = "ns1" const StaticClientNamespace = "ns2" -// Test that Connect works in a default and ACLsEnabled installations for X-Partition and in-partition networking. +// Test that Connect works in a default and ACLsAndAutoEncryptEnabled installations for X-Partition and in-partition networking. func TestPartitions_Connect(t *testing.T) { env := suite.Environment() cfg := suite.Config() @@ -36,10 +36,10 @@ func TestPartitions_Connect(t *testing.T) { const secondaryPartition = "secondary" const defaultNamespace = "default" cases := []struct { - name string - destinationNamespace string - mirrorK8S bool - ACLsEnabled bool + name string + destinationNamespace string + mirrorK8S bool + ACLsAndAutoEncryptEnabled bool }{ { "default destination namespace", @@ -48,7 +48,7 @@ func TestPartitions_Connect(t *testing.T) { false, }, { - "default destination namespace; ACLs enabled", + "default destination namespace; ACLs and auto-encrypt enabled", defaultNamespace, false, true, @@ -60,7 +60,7 @@ func TestPartitions_Connect(t *testing.T) { false, }, { - "single destination namespace; ACLs enabled", + "single destination namespace; ACLs and auto-encrypt enabled", staticServerNamespace, false, true, @@ -72,7 +72,7 @@ func TestPartitions_Connect(t *testing.T) { false, }, { - "mirror k8s namespaces; ACLs enabled", + "mirror k8s namespaces; ACLs and auto-encrypt enabled", staticServerNamespace, true, true, @@ -81,18 +81,21 @@ func TestPartitions_Connect(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - defaultPartitionClusterContext := env.DefaultContext(t) - secondaryPartitionClusterContext := env.Context(t, environment.SecondaryContextName) + serverClusterContext := env.DefaultContext(t) + clientClusterContext := env.Context(t, environment.SecondaryContextName) + + ctx := context.Background() commonHelmValues := map[string]string{ "global.adminPartitions.enabled": "true", - "global.enableConsulNamespaces": "true", - "global.logLevel": "debug", - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.ACLsEnabled), + "global.enableConsulNamespaces": "true", + + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), - "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsEnabled), + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), "connectInject.enabled": "true", // When mirroringK8S is set, this setting is ignored. @@ -108,46 +111,56 @@ func TestPartitions_Connect(t *testing.T) { "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), } - defaultPartitionHelmValues := make(map[string]string) + serverHelmValues := map[string]string{ + "server.exposeGossipAndRPCPorts": "true", + } // On Kind, there are no load balancers but since all clusters // share the same node network (docker bridge), we can use // a NodePort service so that we can access node(s) in a different Kind cluster. if cfg.UseKind { - defaultPartitionHelmValues["meshGateway.service.type"] = "NodePort" - defaultPartitionHelmValues["meshGateway.service.nodePort"] = "30200" // todo: do we need to set this port? - defaultPartitionHelmValues["server.exposeService.type"] = "NodePort" - defaultPartitionHelmValues["server.exposeService.nodePort.https"] = "30000" - defaultPartitionHelmValues["server.exposeService.nodePort.grpc"] = "30100" + serverHelmValues["global.adminPartitions.service.type"] = "NodePort" + serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" + serverHelmValues["meshGateway.service.type"] = "NodePort" + serverHelmValues["meshGateway.service.nodePort"] = "30100" + serverHelmValues["server.exposeService.type"] = "NodePort" } releaseName := helpers.RandomName() - helpers.MergeMaps(defaultPartitionHelmValues, commonHelmValues) + helpers.MergeMaps(serverHelmValues, commonHelmValues) // Install the consul cluster with servers in the default kubernetes context. - serverConsulCluster := consul.NewHelmCluster(t, defaultPartitionHelmValues, defaultPartitionClusterContext, cfg, releaseName) + serverConsulCluster := consul.NewHelmCluster(t, serverHelmValues, serverClusterContext, cfg, releaseName) serverConsulCluster.Create(t) // Get the TLS CA certificate and key secret from the server cluster and apply it to the client cluster. caCertSecretName := fmt.Sprintf("%s-consul-ca-cert", releaseName) + caKeySecretName := fmt.Sprintf("%s-consul-ca-key", releaseName) logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) - k8s.CopySecret(t, defaultPartitionClusterContext, secondaryPartitionClusterContext, caCertSecretName) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, caCertSecretName) + + if !c.ACLsAndAutoEncryptEnabled { + // When auto-encrypt is disabled, we need both + // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. + logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, caKeySecretName) + } partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) - k8s.CopySecret(t, defaultPartitionClusterContext, secondaryPartitionClusterContext, partitionToken) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, partitionToken) } - partitionServiceName := fmt.Sprintf("%s-consul-expose-servers", releaseName) - partitionSvcAddress := k8s.ServiceHost(t, cfg, defaultPartitionClusterContext, partitionServiceName) + partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) + partitionSvcAddress := k8s.ServiceHost(t, cfg, serverClusterContext, partitionServiceName) - k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, secondaryPartitionClusterContext) + k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, clientClusterContext) // Create client cluster. - secondaryPartitionHelmValues := map[string]string{ + clientHelmValues := map[string]string{ "global.enabled": "false", "global.adminPartitions.name": secondaryPartition, @@ -158,64 +171,80 @@ func TestPartitions_Connect(t *testing.T) { "externalServers.enabled": "true", "externalServers.hosts[0]": partitionSvcAddress, "externalServers.tlsServerName": "server.dc1.consul", + + "client.enabled": "true", + "client.exposeGossipPorts": "true", + "client.join[0]": partitionSvcAddress, } - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { // Setup partition token and auth method host if ACLs enabled. - secondaryPartitionHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken - secondaryPartitionHelmValues["global.acls.bootstrapToken.secretKey"] = "token" - secondaryPartitionHelmValues["externalServers.k8sAuthMethodHost"] = k8sAuthMethodHost + clientHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken + clientHelmValues["global.acls.bootstrapToken.secretKey"] = "token" + clientHelmValues["externalServers.k8sAuthMethodHost"] = k8sAuthMethodHost + } else { + // Provide CA key when auto-encrypt is disabled. + clientHelmValues["global.tls.caKey.secretName"] = caKeySecretName + clientHelmValues["global.tls.caKey.secretKey"] = "tls.key" } if cfg.UseKind { - secondaryPartitionHelmValues["externalServers.httpsPort"] = "30000" - secondaryPartitionHelmValues["externalServers.grpcPort"] = "30100" - secondaryPartitionHelmValues["meshGateway.service.type"] = "NodePort" - secondaryPartitionHelmValues["meshGateway.service.nodePort"] = "30200" + clientHelmValues["externalServers.httpsPort"] = "30000" + clientHelmValues["meshGateway.service.type"] = "NodePort" + clientHelmValues["meshGateway.service.nodePort"] = "30100" } - helpers.MergeMaps(secondaryPartitionHelmValues, commonHelmValues) + helpers.MergeMaps(clientHelmValues, commonHelmValues) // Install the consul cluster without servers in the client cluster kubernetes context. - clientConsulCluster := consul.NewHelmCluster(t, secondaryPartitionHelmValues, secondaryPartitionClusterContext, cfg, releaseName) + clientConsulCluster := consul.NewHelmCluster(t, clientHelmValues, clientClusterContext, cfg, releaseName) clientConsulCluster.Create(t) - defaultPartitionClusterStaticServerOpts := &terratestk8s.KubectlOptions{ - ContextName: defaultPartitionClusterContext.KubectlOptions(t).ContextName, - ConfigPath: defaultPartitionClusterContext.KubectlOptions(t).ConfigPath, + // Ensure consul clients are created. + agentPodList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(clientClusterContext.KubectlOptions(t).Namespace).List(ctx, metav1.ListOptions{LabelSelector: "app=consul,component=client"}) + require.NoError(t, err) + require.NotEmpty(t, agentPodList.Items) + + output, err := k8s.RunKubectlAndGetOutputE(t, clientClusterContext.KubectlOptions(t), "logs", agentPodList.Items[0].Name, "-n", clientClusterContext.KubectlOptions(t).Namespace) + require.NoError(t, err) + require.Contains(t, output, "Partition: 'secondary'") + + serverClusterStaticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: serverClusterContext.KubectlOptions(t).ContextName, + ConfigPath: serverClusterContext.KubectlOptions(t).ConfigPath, Namespace: staticServerNamespace, } - defaultPartitionClusterStaticClientOpts := &terratestk8s.KubectlOptions{ - ContextName: defaultPartitionClusterContext.KubectlOptions(t).ContextName, - ConfigPath: defaultPartitionClusterContext.KubectlOptions(t).ConfigPath, + serverClusterStaticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: serverClusterContext.KubectlOptions(t).ContextName, + ConfigPath: serverClusterContext.KubectlOptions(t).ConfigPath, Namespace: StaticClientNamespace, } - secondaryPartitionClusterStaticServerOpts := &terratestk8s.KubectlOptions{ - ContextName: secondaryPartitionClusterContext.KubectlOptions(t).ContextName, - ConfigPath: secondaryPartitionClusterContext.KubectlOptions(t).ConfigPath, + clientClusterStaticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: clientClusterContext.KubectlOptions(t).ContextName, + ConfigPath: clientClusterContext.KubectlOptions(t).ConfigPath, Namespace: staticServerNamespace, } - secondaryPartitionClusterStaticClientOpts := &terratestk8s.KubectlOptions{ - ContextName: secondaryPartitionClusterContext.KubectlOptions(t).ContextName, - ConfigPath: secondaryPartitionClusterContext.KubectlOptions(t).ConfigPath, + clientClusterStaticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: clientClusterContext.KubectlOptions(t).ContextName, + ConfigPath: clientClusterContext.KubectlOptions(t).ConfigPath, Namespace: StaticClientNamespace, } logger.Logf(t, "creating namespaces %s and %s in servers cluster", staticServerNamespace, StaticClientNamespace) - k8s.RunKubectl(t, defaultPartitionClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) - k8s.RunKubectl(t, defaultPartitionClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.RunKubectl(t, defaultPartitionClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) }) logger.Logf(t, "creating namespaces %s and %s in clients cluster", staticServerNamespace, StaticClientNamespace) - k8s.RunKubectl(t, secondaryPartitionClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) - k8s.RunKubectl(t, secondaryPartitionClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.RunKubectl(t, secondaryPartitionClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) }) - consulClient, _ := serverConsulCluster.SetupConsulClient(t, c.ACLsEnabled) + consulClient, _ := serverConsulCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) serverQueryServerOpts := &api.QueryOptions{Namespace: staticServerNamespace, Partition: defaultPartition} clientQueryServerOpts := &api.QueryOptions{Namespace: StaticClientNamespace, Partition: defaultPartition} @@ -231,12 +260,12 @@ func TestPartitions_Connect(t *testing.T) { } // Check that the ACL token is deleted. - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { // We need to register the cleanup function before we create the deployments // because golang will execute them in reverse order i.e. the last registered // cleanup function will be executed first. t.Cleanup(func() { - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { retry.Run(t, func(r *retry.R) { tokens, _, err := consulClient.ACL().TokenList(serverQueryServerOpts) require.NoError(r, err) @@ -270,43 +299,43 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "creating proxy-defaults config") kustomizeDir := "../fixtures/bases/mesh-gateway" - k8s.KubectlApplyK(t, defaultPartitionClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlApplyK(t, serverClusterContext.KubectlOptions(t), kustomizeDir) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, defaultPartitionClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlDeleteK(t, serverClusterContext.KubectlOptions(t), kustomizeDir) }) - k8s.KubectlApplyK(t, secondaryPartitionClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlApplyK(t, clientClusterContext.KubectlOptions(t), kustomizeDir) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, secondaryPartitionClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlDeleteK(t, clientClusterContext.KubectlOptions(t), kustomizeDir) }) // This section of the tests runs the in-partition networking tests. t.Run("in-partition", func(t *testing.T) { logger.Log(t, "test in-partition networking") logger.Log(t, "creating static-server and static-client deployments in server cluster") - k8s.DeployKustomize(t, defaultPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, serverClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") } else { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") } } logger.Log(t, "creating static-server and static-client deployments in client cluster") - k8s.DeployKustomize(t, secondaryPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, clientClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") } else { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") } } // Check that both static-server and static-client have been injected and now have 2 containers in server cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := defaultPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := serverClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -316,7 +345,7 @@ func TestPartitions_Connect(t *testing.T) { // Check that both static-server and static-client have been injected and now have 2 containers in client cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := secondaryPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -348,14 +377,14 @@ func TestPartitions_Connect(t *testing.T) { require.NoError(t, err) require.Len(t, services, 1) - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Log(t, "checking that the connection is not successful because there's no intention") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } intention := &api.ServiceIntentionsConfigEntry{ @@ -393,18 +422,18 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that connection is successful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } // Test that kubernetes readiness status is synced to Consul. // Create the file so that the readiness probe of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, defaultPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") - k8s.RunKubectl(t, secondaryPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, serverClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, clientClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. @@ -413,41 +442,41 @@ func TestPartitions_Connect(t *testing.T) { // from server, which is the case when a connection is unsuccessful due to intentions in other tests. logger.Log(t, "checking that connection is unsuccessful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") } }) // This section of the tests runs the cross-partition networking tests. t.Run("cross-partition", func(t *testing.T) { logger.Log(t, "test cross-partition networking") logger.Log(t, "creating static-server and static-client deployments in server cluster") - k8s.DeployKustomize(t, defaultPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, serverClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-partition") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-partition") } else { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-partition") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-partition") } } logger.Log(t, "creating static-server and static-client deployments in client cluster") - k8s.DeployKustomize(t, secondaryPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, clientClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-default-partition") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-default-partition") } else { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-default-partition") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-default-partition") } } // Check that both static-server and static-client have been injected and now have 2 containers in server cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := defaultPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := serverClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -457,7 +486,7 @@ func TestPartitions_Connect(t *testing.T) { // Check that both static-server and static-client have been injected and now have 2 containers in client cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := secondaryPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -494,34 +523,34 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "creating exported services") if c.destinationNamespace == defaultNamespace { - k8s.KubectlApplyK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - k8s.KubectlApplyK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") + k8s.KubectlApplyK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") + k8s.KubectlApplyK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - k8s.KubectlDeleteK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") + k8s.KubectlDeleteK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") + k8s.KubectlDeleteK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") }) } else { - k8s.KubectlApplyK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") - k8s.KubectlApplyK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") + k8s.KubectlApplyK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") + k8s.KubectlApplyK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") - k8s.KubectlDeleteK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") + k8s.KubectlDeleteK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") + k8s.KubectlDeleteK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") }) } - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Log(t, "checking that the connection is not successful because there's no intention") if cfg.EnableTransparentProxy { if !c.mirrorK8S { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) } else { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) } } else { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } intention := &api.ServiceIntentionsConfigEntry{ @@ -562,22 +591,22 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that connection is successful") if cfg.EnableTransparentProxy { if !c.mirrorK8S { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) } else { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) } } else { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } // Test that kubernetes readiness status is synced to Consul. // Create the file so that the readiness probe of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, defaultPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") - k8s.RunKubectl(t, secondaryPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, serverClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, clientClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. @@ -587,15 +616,15 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that connection is unsuccessful") if cfg.EnableTransparentProxy { if !c.mirrorK8S { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) } } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") } }) }) diff --git a/acceptance/tests/partitions/partitions_sync_test.go b/acceptance/tests/partitions/partitions_sync_test.go index e29ef18c78..59108b2d3a 100644 --- a/acceptance/tests/partitions/partitions_sync_test.go +++ b/acceptance/tests/partitions/partitions_sync_test.go @@ -1,6 +1,7 @@ package partitions import ( + "context" "fmt" "strconv" "testing" @@ -15,9 +16,10 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// Test that Sync Catalog works in a default and ACLsEnabled installations for partitions. +// Test that Sync Catalog works in a default and ACLsAndAutoEncryptEnabled installations for partitions. func TestPartitions_Sync(t *testing.T) { env := suite.Environment() cfg := suite.Config() @@ -33,10 +35,10 @@ func TestPartitions_Sync(t *testing.T) { const secondaryPartition = "secondary" const defaultNamespace = "default" cases := []struct { - name string - destinationNamespace string - mirrorK8S bool - ACLsEnabled bool + name string + destinationNamespace string + mirrorK8S bool + ACLsAndAutoEncryptEnabled bool }{ { "default destination namespace", @@ -81,14 +83,18 @@ func TestPartitions_Sync(t *testing.T) { primaryClusterContext := env.DefaultContext(t) secondaryClusterContext := env.Context(t, environment.SecondaryContextName) + ctx := context.Background() + commonHelmValues := map[string]string{ "global.adminPartitions.enabled": "true", - "global.enableConsulNamespaces": "true", - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.ACLsEnabled), + "global.enableConsulNamespaces": "true", + + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), - "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsEnabled), + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), "syncCatalog.enabled": "true", // When mirroringK8S is set, this setting is ignored. @@ -108,8 +114,8 @@ func TestPartitions_Sync(t *testing.T) { // share the same node network (docker bridge), we can use // a NodePort service so that we can access node(s) in a different Kind cluster. if cfg.UseKind { - serverHelmValues["server.exposeService.type"] = "NodePort" - serverHelmValues["server.exposeService.nodePort.https"] = "30000" + serverHelmValues["global.adminPartitions.service.type"] = "NodePort" + serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" } releaseName := helpers.RandomName() @@ -127,7 +133,7 @@ func TestPartitions_Sync(t *testing.T) { logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, caCertSecretName) - if !c.ACLsEnabled { + if !c.ACLsAndAutoEncryptEnabled { // When auto-encrypt is disabled, we need both // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) @@ -135,12 +141,12 @@ func TestPartitions_Sync(t *testing.T) { } partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, partitionToken) } - partitionServiceName := fmt.Sprintf("%s-consul-expose-servers", releaseName) + partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) partitionSvcAddress := k8s.ServiceHost(t, cfg, primaryClusterContext, partitionServiceName) k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, secondaryClusterContext) @@ -157,9 +163,13 @@ func TestPartitions_Sync(t *testing.T) { "externalServers.enabled": "true", "externalServers.hosts[0]": partitionSvcAddress, "externalServers.tlsServerName": "server.dc1.consul", + + "client.enabled": "true", + "client.exposeGossipPorts": "true", + "client.join[0]": partitionSvcAddress, } - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { // Setup partition token and auth method host if ACLs enabled. clientHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken clientHelmValues["global.acls.bootstrapToken.secretKey"] = "token" @@ -180,6 +190,15 @@ func TestPartitions_Sync(t *testing.T) { secondaryConsulCluster := consul.NewHelmCluster(t, clientHelmValues, secondaryClusterContext, cfg, releaseName) secondaryConsulCluster.Create(t) + // Ensure consul clients are created. + agentPodList, err := secondaryClusterContext.KubernetesClient(t).CoreV1().Pods(secondaryClusterContext.KubectlOptions(t).Namespace).List(ctx, metav1.ListOptions{LabelSelector: "app=consul,component=client"}) + require.NoError(t, err) + require.NotEmpty(t, agentPodList.Items) + + output, err := k8s.RunKubectlAndGetOutputE(t, secondaryClusterContext.KubectlOptions(t), "logs", agentPodList.Items[0].Name, "-n", secondaryClusterContext.KubectlOptions(t).Namespace) + require.NoError(t, err) + require.Contains(t, output, "Partition: 'secondary'") + primaryStaticServerOpts := &terratestk8s.KubectlOptions{ ContextName: primaryClusterContext.KubectlOptions(t).ContextName, ConfigPath: primaryClusterContext.KubectlOptions(t).ConfigPath, @@ -203,7 +222,7 @@ func TestPartitions_Sync(t *testing.T) { k8s.RunKubectl(t, secondaryClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace) }) - consulClient, _ := primaryConsulCluster.SetupConsulClient(t, c.ACLsEnabled) + consulClient, _ := primaryConsulCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) defaultPartitionQueryOpts := &api.QueryOptions{Namespace: staticServerNamespace, Partition: defaultPartition} secondaryPartitionQueryOpts := &api.QueryOptions{Namespace: staticServerNamespace, Partition: secondaryPartition} @@ -214,12 +233,12 @@ func TestPartitions_Sync(t *testing.T) { } // Check that the ACL token is deleted. - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { // We need to register the cleanup function before we create the deployments // because golang will execute them in reverse order i.e. the last registered // cleanup function will be executed first. t.Cleanup(func() { - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { retry.Run(t, func(r *retry.R) { tokens, _, err := consulClient.ACL().TokenList(defaultPartitionQueryOpts) require.NoError(r, err) @@ -273,6 +292,7 @@ func TestPartitions_Sync(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(service)) require.Equal(t, []string{"k8s"}, service[0].ServiceTags) + }) } } diff --git a/acceptance/tests/peering/main_test.go b/acceptance/tests/peering/main_test.go index 12bb35afd5..2a5bf4b448 100644 --- a/acceptance/tests/peering/main_test.go +++ b/acceptance/tests/peering/main_test.go @@ -10,13 +10,18 @@ import ( var suite testsuite.Suite +// TestMain for peering is DISABLED for 0.49. func TestMain(m *testing.M) { - suite = testsuite.NewSuite(m) - if suite.Config().EnableMultiCluster && !suite.Config().DisablePeering { - os.Exit(suite.Run()) - } else { - fmt.Println("Skipping peering tests because either -enable-multi-cluster is not set or -disable-peering is set") - os.Exit(0) - } + fmt.Println("Skipping peering tests because this is a beta feature and not fully supported") + os.Exit(0) + + //suite = testsuite.NewSuite(m) + // + //if suite.Config().EnableMultiCluster && !suite.Config().DisablePeering { + // os.Exit(suite.Run()) + //} else { + // fmt.Println("Skipping peering tests because either -enable-multi-cluster is not set or -disable-peering is set") + // os.Exit(0) + //} } diff --git a/acceptance/tests/peering/peering_connect_namespaces_test.go b/acceptance/tests/peering/peering_connect_namespaces_test.go index 94d82b4a2e..05915eb507 100644 --- a/acceptance/tests/peering/peering_connect_namespaces_test.go +++ b/acceptance/tests/peering/peering_connect_namespaces_test.go @@ -34,10 +34,6 @@ func TestPeering_ConnectNamespaces(t *testing.T) { t.Skipf("skipping this test because -enable-enterprise is not set") } - if cfg.EnableTransparentProxy { - t.Skipf("skipping because no t-proxy support") - } - ver, err := version.NewVersion("1.13.0") require.NoError(t, err) if cfg.ConsulVersion != nil && cfg.ConsulVersion.LessThan(ver) { @@ -48,10 +44,10 @@ func TestPeering_ConnectNamespaces(t *testing.T) { const staticClientPeer = "client" const defaultNamespace = "default" cases := []struct { - name string - destinationNamespace string - mirrorK8S bool - ACLsEnabled bool + name string + destinationNamespace string + mirrorK8S bool + ACLsAndAutoEncryptEnabled bool }{ { "default destination namespace", @@ -100,10 +96,11 @@ func TestPeering_ConnectNamespaces(t *testing.T) { "global.peering.enabled": "true", "global.enableConsulNamespaces": "true", - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.ACLsEnabled), + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), - "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsEnabled), + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), "connectInject.enabled": "true", @@ -135,6 +132,8 @@ func TestPeering_ConnectNamespaces(t *testing.T) { staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticServerPeerHelmValues["server.exposeService.type"] = "NodePort" + staticServerPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" } releaseName := helpers.RandomName() @@ -157,6 +156,8 @@ func TestPeering_ConnectNamespaces(t *testing.T) { staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticClientPeerHelmValues["server.exposeService.type"] = "NodePort" + staticClientPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" } helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) @@ -165,41 +166,6 @@ func TestPeering_ConnectNamespaces(t *testing.T) { staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) staticClientPeerCluster.Create(t) - // Create Mesh resource to use mesh gateways. - logger.Log(t, "creating mesh config") - kustomizeMeshDir := "../fixtures/bases/mesh-peering" - - k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - }) - - k8s.KubectlApplyK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - }) - - staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsEnabled) - staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsEnabled) - - // Ensure mesh config entries are created in Consul. - timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} - retry.RunWith(timer, t, func(r *retry.R) { - ceServer, _, err := staticServerPeerClient.ConfigEntries().Get(api.MeshConfig, "mesh", &api.QueryOptions{}) - require.NoError(r, err) - configEntryServer, ok := ceServer.(*api.MeshConfigEntry) - require.True(r, ok) - require.Equal(r, configEntryServer.GetName(), "mesh") - require.NoError(r, err) - - ceClient, _, err := staticClientPeerClient.ConfigEntries().Get(api.MeshConfig, "mesh", &api.QueryOptions{}) - require.NoError(r, err) - configEntryClient, ok := ceClient.(*api.MeshConfigEntry) - require.True(r, ok) - require.Equal(r, configEntryClient.GetName(), "mesh") - require.NoError(r, err) - }) - // Create the peering acceptor on the client peer. k8s.KubectlApply(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { @@ -207,6 +173,7 @@ func TestPeering_ConnectNamespaces(t *testing.T) { }) // Ensure the secret is created. + timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} retry.RunWith(timer, t, func(r *retry.R) { acceptorSecretName, err := k8s.RunKubectlAndGetOutputE(t, staticClientPeerClusterContext.KubectlOptions(t), "get", "peeringacceptor", "server", "-o", "jsonpath={.status.secret.name}") require.NoError(r, err) @@ -246,6 +213,9 @@ func TestPeering_ConnectNamespaces(t *testing.T) { k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "delete", "ns", staticClientNamespace) }) + staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + serverQueryOpts := &api.QueryOptions{Namespace: staticServerNamespace} clientQueryOpts := &api.QueryOptions{Namespace: staticClientNamespace} @@ -326,7 +296,7 @@ func TestPeering_ConnectNamespaces(t *testing.T) { }) } - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Log(t, "checking that the connection is not successful because there's no allow intention") if cfg.EnableTransparentProxy { k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, staticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", fmt.Sprintf("curl: (7) Failed to connect to static-server.%s port 80: Connection refused", c.destinationNamespace)}, "", fmt.Sprintf("http://static-server.virtual.%s.%s.consul", c.destinationNamespace, staticServerPeer)) diff --git a/acceptance/tests/peering/peering_connect_test.go b/acceptance/tests/peering/peering_connect_test.go index 00f1d124b2..6a884b2efa 100644 --- a/acceptance/tests/peering/peering_connect_test.go +++ b/acceptance/tests/peering/peering_connect_test.go @@ -34,8 +34,8 @@ func TestPeering_Connect(t *testing.T) { const staticServerPeer = "server" const staticClientPeer = "client" cases := []struct { - name string - ACLsEnabled bool + name string + ACLsAndAutoEncryptEnabled bool }{ { "default installation", @@ -55,10 +55,11 @@ func TestPeering_Connect(t *testing.T) { commonHelmValues := map[string]string{ "global.peering.enabled": "true", - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.ACLsEnabled), + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), - "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsEnabled), + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), "connectInject.enabled": "true", @@ -86,6 +87,8 @@ func TestPeering_Connect(t *testing.T) { staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticServerPeerHelmValues["server.exposeService.type"] = "NodePort" + staticServerPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" } releaseName := helpers.RandomName() @@ -101,13 +104,15 @@ func TestPeering_Connect(t *testing.T) { } if !cfg.UseKind { - staticClientPeerHelmValues["server.replicas"] = "3" + staticServerPeerHelmValues["server.replicas"] = "3" } if cfg.UseKind { staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticClientPeerHelmValues["server.exposeService.type"] = "NodePort" + staticClientPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" } helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) @@ -116,41 +121,6 @@ func TestPeering_Connect(t *testing.T) { staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) staticClientPeerCluster.Create(t) - // Create Mesh resource to use mesh gateways. - logger.Log(t, "creating mesh config") - kustomizeMeshDir := "../fixtures/bases/mesh-peering" - - k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - }) - - k8s.KubectlApplyK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - }) - - staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsEnabled) - staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsEnabled) - - // Ensure mesh config entries are created in Consul. - timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} - retry.RunWith(timer, t, func(r *retry.R) { - ceServer, _, err := staticServerPeerClient.ConfigEntries().Get(api.MeshConfig, "mesh", &api.QueryOptions{}) - require.NoError(r, err) - configEntryServer, ok := ceServer.(*api.MeshConfigEntry) - require.True(r, ok) - require.Equal(r, configEntryServer.GetName(), "mesh") - require.NoError(r, err) - - ceClient, _, err := staticClientPeerClient.ConfigEntries().Get(api.MeshConfig, "mesh", &api.QueryOptions{}) - require.NoError(r, err) - configEntryClient, ok := ceClient.(*api.MeshConfigEntry) - require.True(r, ok) - require.Equal(r, configEntryClient.GetName(), "mesh") - require.NoError(r, err) - }) - // Create the peering acceptor on the client peer. k8s.KubectlApply(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { @@ -158,6 +128,7 @@ func TestPeering_Connect(t *testing.T) { }) // Ensure the secret is created. + timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} retry.RunWith(timer, t, func(r *retry.R) { acceptorSecretName, err := k8s.RunKubectlAndGetOutputE(t, staticClientPeerClusterContext.KubectlOptions(t), "get", "peeringacceptor", "server", "-o", "jsonpath={.status.secret.name}") require.NoError(r, err) @@ -197,6 +168,9 @@ func TestPeering_Connect(t *testing.T) { k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "delete", "ns", staticClientNamespace) }) + staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + // Create a ProxyDefaults resource to configure services to use the mesh // gateways. logger.Log(t, "creating proxy-defaults config") @@ -253,7 +227,7 @@ func TestPeering_Connect(t *testing.T) { k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default") }) - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Log(t, "checking that the connection is not successful because there's no allow intention") if cfg.EnableTransparentProxy { k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, staticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.consul", staticServerPeer)) @@ -284,7 +258,6 @@ func TestPeering_Connect(t *testing.T) { } else { k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") } - }) } } diff --git a/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go b/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go index 5895990901..531e7a5274 100644 --- a/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go +++ b/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "strconv" + "strings" "testing" "time" @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -31,76 +31,83 @@ func TestSnapshotAgent_K8sSecret(t *testing.T) { if cfg.EnableCNI { t.Skipf("skipping because -enable-cni is set and snapshot agent is already tested with regular tproxy") } + ctx := suite.Environment().DefaultContext(t) + kubectlOptions := ctx.KubectlOptions(t) + ns := kubectlOptions.Namespace + releaseName := helpers.RandomName() - cases := map[string]struct { - secure bool - }{ - "non-secure": {secure: false}, - "secure": {secure: true}, - } + // Generate a bootstrap token + bootstrapToken, err := uuid.GenerateUUID() + require.NoError(t, err) - for name, c := range cases { - t.Run(name, func(t *testing.T) { - ctx := suite.Environment().DefaultContext(t) - kubectlOptions := ctx.KubectlOptions(t) - ns := kubectlOptions.Namespace - releaseName := helpers.RandomName() + bsSecretName := fmt.Sprintf("%s-acl-bootstrap-token", releaseName) + bsSecretKey := "token" + saSecretName := fmt.Sprintf("%s-snapshot-agent-config", releaseName) + saSecretKey := "token" - saSecretName := fmt.Sprintf("%s-snapshot-agent-config", releaseName) - saSecretKey := "config" + // Create cluster + helmValues := map[string]string{ + "global.tls.enabled": "true", + "global.gossipEncryption.autoGenerate": "true", + "global.acls.manageSystemACLs": "true", + "global.acls.bootstrapToken.secretName": bsSecretName, + "global.acls.bootstrapToken.secretKey": bsSecretKey, + "client.snapshotAgent.enabled": "true", + "client.snapshotAgent.configSecret.secretName": saSecretName, + "client.snapshotAgent.configSecret.secretKey": saSecretKey, + } - // Create cluster - helmValues := map[string]string{ - "global.tls.enabled": strconv.FormatBool(c.secure), - "global.gossipEncryption.autoGenerate": strconv.FormatBool(c.secure), - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - "server.snapshotAgent.enabled": "true", - "server.snapshotAgent.configSecret.secretName": saSecretName, - "server.snapshotAgent.configSecret.secretKey": saSecretKey, - "connectInject.enabled": "false", - "controller.enabled": "false", - } + // Get new cluster + consulCluster := consul.NewHelmCluster(t, helmValues, suite.Environment().DefaultContext(t), cfg, releaseName) + client := environment.KubernetesClientFromOptions(t, kubectlOptions) - // Get new cluster - consulCluster := consul.NewHelmCluster(t, helmValues, suite.Environment().DefaultContext(t), cfg, releaseName) - client := environment.KubernetesClientFromOptions(t, kubectlOptions) + // Add bootstrap token secret + logger.Log(t, "Storing bootstrap token as a k8s secret") + consul.CreateK8sSecret(t, client, cfg, ns, bsSecretName, bsSecretKey, bootstrapToken) - // Add snapshot agent config secret - logger.Log(t, "Storing snapshot agent config as a k8s secret") - config := generateSnapshotAgentConfig(t) - logger.Logf(t, "Snapshot agent config: %s", config) - consul.CreateK8sSecret(t, client, cfg, ns, saSecretName, saSecretKey, config) + // Add snapshot agent config secret + logger.Log(t, "Storing snapshot agent config as a k8s secret") + config := generateSnapshotAgentConfig(t, bootstrapToken) + logger.Logf(t, "Snapshot agent config: %s", config) + consul.CreateK8sSecret(t, client, cfg, ns, saSecretName, saSecretKey, config) - // Create cluster - consulCluster.Create(t) - // ---------------------------------- + // Create cluster + consulCluster.Create(t) + // ---------------------------------- - // Validate that consul snapshot agent is running correctly and is generating snapshot files - logger.Log(t, "Confirming that Consul Snapshot Agent is generating snapshot files") - // Create k8s client from kubectl options. + // Validate that consul snapshot agent is running correctly and is generating snapshot files + logger.Log(t, "Confirming that Consul Snapshot Agent is generating snapshot files") + // Create k8s client from kubectl options. + + podList, err := client.CoreV1().Pods(kubectlOptions.Namespace).List(context.Background(), + metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=client-snapshot-agent,release=%s", releaseName)}) + require.NoError(t, err) + require.True(t, len(podList.Items) > 0) - podList, err := client.CoreV1().Pods(kubectlOptions.Namespace).List(context.Background(), - metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=server,release=%s", releaseName)}) - require.NoError(t, err) - require.Len(t, podList.Items, 1, "expected to find only 1 consul server instance") + // Wait for 10seconds to allow snapsot to write. + time.Sleep(10 * time.Second) - // We need to give some extra time for ACLs to finish bootstrapping and for servers to come up. - timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} - retry.RunWith(timer, t, func(r *retry.R) { - // Loop through snapshot agents. Only one will be the leader and have the snapshot files. - pod := podList.Items[0] - snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/tmp") - require.NoError(r, err) - logger.Logf(t, "Snapshot: \n%s", snapshotFileListOutput) - require.Contains(r, snapshotFileListOutput, ".snap", "Agent pod does not contain snapshot files") - }) - }) + // Loop through snapshot agents. Only one will be the leader and have the snapshot files. + hasSnapshots := false + for _, pod := range podList.Items { + snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/") + logger.Logf(t, "Snapshot: \n%s", snapshotFileListOutput) + require.NoError(t, err) + if strings.Contains(snapshotFileListOutput, ".snap") { + logger.Logf(t, "Agent pod contains snapshot files") + hasSnapshots = true + break + } else { + logger.Logf(t, "Agent pod does not contain snapshot files") + } } + require.True(t, hasSnapshots, ".snap") } -func generateSnapshotAgentConfig(t *testing.T) string { +func generateSnapshotAgentConfig(t *testing.T, token string) string { config := map[string]interface{}{ "snapshot_agent": map[string]interface{}{ + "token": token, "log": map[string]interface{}{ "level": "INFO", "enable_syslog": false, @@ -117,7 +124,7 @@ func generateSnapshotAgentConfig(t *testing.T) string { "local_scratch_path": "", }, "local_storage": map[string]interface{}{ - "path": "/tmp", + "path": ".", }, }, } diff --git a/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go b/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go index 5202b3269a..a0f3539592 100644 --- a/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go +++ b/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go @@ -3,6 +3,7 @@ package snapshotagent import ( "context" "fmt" + "strings" "testing" "time" @@ -13,7 +14,6 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul-k8s/acceptance/framework/vault" - "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-uuid" "github.com/hashicorp/go-version" "github.com/stretchr/testify/require" @@ -54,6 +54,13 @@ func TestSnapshotAgent_Vault(t *testing.T) { // ------------------------- // PKI // ------------------------- + // Configure Service Mesh CA + connectCAPolicy := "connect-ca-dc1" + connectCARootPath := "connect_root" + connectCAIntermediatePath := "dc1/connect_inter" + // Configure Policy for Connect CA + vault.CreateConnectCARootAndIntermediatePKIPolicy(t, vaultClient, connectCAPolicy, connectCARootPath, connectCAIntermediatePath) + // Configure Server PKI serverPKIConfig := &vault.PKIAndAuthRoleConfiguration{ BaseURL: "pki", @@ -105,7 +112,7 @@ func TestSnapshotAgent_Vault(t *testing.T) { bootstrapTokenSecret.SaveSecretAndAddReadPolicy(t, vaultClient) // Snapshot Agent config - snapshotAgentConfig := generateSnapshotAgentConfig(t) + snapshotAgentConfig := generateSnapshotAgentConfig(t, bootstrapToken) require.NoError(t, err) snapshotAgentConfigSecret := &vault.KV2Secret{ Path: "consul/data/secret/snapshot-agent-config", @@ -118,7 +125,7 @@ func TestSnapshotAgent_Vault(t *testing.T) { // ------------------------- // Additional Auth Roles // ------------------------- - serverPolicies := fmt.Sprintf("%s,%s,%s,%s", gossipSecret.PolicyName, serverPKIConfig.PolicyName, bootstrapTokenSecret.PolicyName, snapshotAgentConfigSecret.PolicyName) + serverPolicies := fmt.Sprintf("%s,%s,%s,%s", gossipSecret.PolicyName, connectCAPolicy, serverPKIConfig.PolicyName, bootstrapTokenSecret.PolicyName) if cfg.EnableEnterprise { serverPolicies += fmt.Sprintf(",%s", licenseSecret.PolicyName) } @@ -134,6 +141,18 @@ func TestSnapshotAgent_Vault(t *testing.T) { } srvAuthRoleConfig.ConfigureK8SAuthRole(t, vaultClient) + // client + consulClientRole := "client" + consulClientServiceAccountName := fmt.Sprintf("%s-consul-%s", consulReleaseName, "client") + clientAuthRoleConfig := &vault.KubernetesAuthRoleConfiguration{ + ServiceAccountName: consulClientServiceAccountName, + KubernetesNamespace: ns, + AuthMethodPath: "kubernetes", + RoleName: consulClientRole, + PolicyNames: gossipSecret.PolicyName, + } + clientAuthRoleConfig.ConfigureK8SAuthRole(t, vaultClient) + // manageSystemACLs manageSystemACLsRole := "server-acl-init" manageSystemACLsServiceAccountName := fmt.Sprintf("%s-consul-%s", consulReleaseName, "server-acl-init") @@ -156,6 +175,18 @@ func TestSnapshotAgent_Vault(t *testing.T) { } srvCAAuthRoleConfig.ConfigureK8SAuthRole(t, vaultClient) + // snapshot agent config + snapAgentRole := "snapshot-agent" + snapAgentServiceAccountName := fmt.Sprintf("%s-consul-%s", consulReleaseName, "snapshot-agent") + saAuthRoleConfig := &vault.KubernetesAuthRoleConfiguration{ + ServiceAccountName: snapAgentServiceAccountName, + KubernetesNamespace: ns, + AuthMethodPath: "kubernetes", + RoleName: snapAgentRole, + PolicyNames: fmt.Sprintf("%s,%s", licenseSecret.PolicyName, snapshotAgentConfigSecret.PolicyName), + } + saAuthRoleConfig.ConfigureK8SAuthRole(t, vaultClient) + vaultCASecret := vault.CASecretName(vaultReleaseName) consulHelmValues := map[string]string{ @@ -163,17 +194,23 @@ func TestSnapshotAgent_Vault(t *testing.T) { "server.extraVolumes[0].name": vaultCASecret, "server.extraVolumes[0].load": "false", - "connectInject.enabled": "false", + "connectInject.enabled": "true", "connectInject.replicas": "1", - "controller.enabled": "false", + "controller.enabled": "true", "global.secretsBackend.vault.enabled": "true", "global.secretsBackend.vault.consulServerRole": consulServerRole, + "global.secretsBackend.vault.consulClientRole": consulClientRole, + "global.secretsBackend.vault.consulCARole": serverPKIConfig.RoleName, "global.secretsBackend.vault.manageSystemACLsRole": manageSystemACLsRole, "global.secretsBackend.vault.ca.secretName": vaultCASecret, "global.secretsBackend.vault.ca.secretKey": "tls.crt", + "global.secretsBackend.vault.connectCA.address": vaultCluster.Address(), + "global.secretsBackend.vault.connectCA.rootPKIPath": connectCARootPath, + "global.secretsBackend.vault.connectCA.intermediatePKIPath": connectCAIntermediatePath, + "global.acls.manageSystemACLs": "true", "global.acls.bootstrapToken.secretName": bootstrapTokenSecret.Path, "global.acls.bootstrapToken.secretKey": bootstrapTokenSecret.Key, @@ -183,9 +220,10 @@ func TestSnapshotAgent_Vault(t *testing.T) { "global.tls.caCert.secretName": serverPKIConfig.CAPath, "global.tls.enableAutoEncrypt": "true", - "server.snapshotAgent.enabled": "true", - "server.snapshotAgent.configSecret.secretName": snapshotAgentConfigSecret.Path, - "server.snapshotAgent.configSecret.secretKey": snapshotAgentConfigSecret.Key, + "client.snapshotAgent.enabled": "true", + "client.snapshotAgent.configSecret.secretName": snapshotAgentConfigSecret.Path, + "client.snapshotAgent.configSecret.secretKey": snapshotAgentConfigSecret.Key, + "global.secretsBackend.vault.consulSnapshotAgentRole": snapAgentRole, } if cfg.EnableEnterprise { @@ -202,18 +240,26 @@ func TestSnapshotAgent_Vault(t *testing.T) { // Create k8s client from kubectl options. client := environment.KubernetesClientFromOptions(t, kubectlOptions) podList, err := client.CoreV1().Pods(kubectlOptions.Namespace).List(context.Background(), - metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=server,release=%s", consulReleaseName)}) + metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=client-snapshot-agent,release=%s", consulReleaseName)}) require.NoError(t, err) - require.Len(t, podList.Items, 1, "expected to find only 1 consul server instance") - - // We need to give some extra time for ACLs to finish bootstrapping and for servers to come up. - timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} - retry.RunWith(timer, t, func(r *retry.R) { - // Loop through snapshot agents. Only one will be the leader and have the snapshot files. - pod := podList.Items[0] - snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/tmp") - require.NoError(r, err) + require.True(t, len(podList.Items) > 0) + + // Wait for 10 seconds to allow snapshot to write. + time.Sleep(10 * time.Second) + + // Loop through snapshot agents. Only one will be the leader and have the snapshot files. + hasSnapshots := false + for _, pod := range podList.Items { + snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/") logger.Logf(t, "Snapshot: \n%s", snapshotFileListOutput) - require.Contains(r, snapshotFileListOutput, ".snap", "Agent pod does not contain snapshot files") - }) + require.NoError(t, err) + if strings.Contains(snapshotFileListOutput, ".snap") { + logger.Logf(t, "Agent pod contains snapshot files") + hasSnapshots = true + break + } else { + logger.Logf(t, "Agent pod does not contain snapshot files") + } + } + require.True(t, hasSnapshots) } diff --git a/acceptance/tests/sync/sync_catalog_test.go b/acceptance/tests/sync/sync_catalog_test.go index 92b006cac6..942843d53f 100644 --- a/acceptance/tests/sync/sync_catalog_test.go +++ b/acceptance/tests/sync/sync_catalog_test.go @@ -2,7 +2,6 @@ package sync import ( "fmt" - "strconv" "testing" "time" @@ -23,25 +22,45 @@ func TestSyncCatalog(t *testing.T) { if cfg.EnableCNI { t.Skipf("skipping because -enable-cni is set and sync catalog is already tested with regular tproxy") } - - cases := map[string]struct { - secure bool + cases := []struct { + name string + helmValues map[string]string + secure bool }{ - "non-secure": {secure: false}, - "secure": {secure: true}, + { + "Default installation", + map[string]string{ + "syncCatalog.enabled": "true", + }, + false, + }, + { + "Secure installation (with TLS and ACLs enabled)", + map[string]string{ + "syncCatalog.enabled": "true", + "global.tls.enabled": "true", + "global.acls.manageSystemACLs": "true", + }, + true, + }, + { + "Secure installation (with TLS with auto-encrypt and ACLs enabled)", + map[string]string{ + "syncCatalog.enabled": "true", + "global.tls.enabled": "true", + "global.tls.enableAutoEncrypt": "true", + "global.acls.manageSystemACLs": "true", + }, + true, + }, } - for name, c := range cases { - t.Run(name, func(t *testing.T) { + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) - helmValues := map[string]string{ - "syncCatalog.enabled": "true", - "global.tls.enabled": strconv.FormatBool(c.secure), - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - } releaseName := helpers.RandomName() - consulCluster := consul.NewHelmCluster(t, helmValues, ctx, suite.Config(), releaseName) + consulCluster := consul.NewHelmCluster(t, c.helmValues, ctx, suite.Config(), releaseName) consulCluster.Create(t) diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go index 4ff4ae7bd4..d18d971743 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go @@ -2,6 +2,9 @@ package terminatinggateway import ( "fmt" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-version" + "github.com/stretchr/testify/require" "strconv" "testing" @@ -9,9 +12,6 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-version" - "github.com/stretchr/testify/require" ) // Test that egress Destinations route through terminating gateways. @@ -60,6 +60,7 @@ func TestTerminatingGatewayDestinations(t *testing.T) { "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.autoEncrypt": strconv.FormatBool(c.secure), } logger.Log(t, "creating consul cluster") diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go index 8c4435ae75..7168fdd4e8 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go @@ -10,12 +10,17 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" ) const testNamespace = "ns1" // Test we can connect through the terminating gateway when both // the terminating gateway and the connect service are in the same namespace. +// These tests currently only test non-secure and secure without auto-encrypt installations +// because in the case of namespaces there isn't a significant distinction in code between auto-encrypt +// and non-auto-encrypt secure installations, so testing just one is enough. func TestTerminatingGatewaySingleNamespace(t *testing.T) { cfg := suite.Config() if !cfg.EnableEnterprise { @@ -26,10 +31,10 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { secure bool }{ { - secure: false, + false, }, { - secure: true, + true, }, } for _, c := range cases { @@ -37,6 +42,8 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) + // Install the Helm chart without the terminating gateway first + // so that we can create the namespace for it. helmValues := map[string]string{ "connectInject.enabled": "true", "connectInject.consulNamespaces.consulDestinationNamespace": testNamespace, @@ -44,19 +51,33 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { "global.enableConsulNamespaces": "true", "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), - - "terminatingGateways.enabled": "true", - "terminatingGateways.gateways[0].name": "terminating-gateway", - "terminatingGateways.gateways[0].replicas": "1", - "terminatingGateways.gateways[0].consulNamespace": testNamespace, } releaseName := helpers.RandomName() consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) + consulCluster.Create(t) consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) + // Create the destination namespace in the non-secure case. + // In the secure installation, this namespace is created by the server-acl-init job. + if !c.secure { + logger.Logf(t, "creating the %s namespace in Consul", testNamespace) + _, _, err := consulClient.Namespaces().Create(&api.Namespace{ + Name: testNamespace, + }, nil) + require.NoError(t, err) + } + + logger.Log(t, "upgrading with terminating gateways enabled") + consulCluster.Upgrade(t, map[string]string{ + "terminatingGateways.enabled": "true", + "terminatingGateways.gateways[0].name": "terminating-gateway", + "terminatingGateways.gateways[0].replicas": "1", + "terminatingGateways.gateways[0].consulNamespace": testNamespace, + }) + logger.Logf(t, "creating Kubernetes namespace %s", testNamespace) k8s.RunKubectl(t, ctx.KubectlOptions(t), "create", "ns", testNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { @@ -111,6 +132,9 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { // Test we can connect through the terminating gateway when the terminating gateway, // the external service, and the connect service are in different namespace. +// These tests currently only test non-secure and secure without auto-encrypt installations +// because in the case of namespaces there isn't a significant distinction in code between auto-encrypt +// and non-auto-encrypt secure installations, so testing just one is enough. func TestTerminatingGatewayNamespaceMirroring(t *testing.T) { cfg := suite.Config() if !cfg.EnableEnterprise { @@ -121,10 +145,10 @@ func TestTerminatingGatewayNamespaceMirroring(t *testing.T) { secure bool }{ { - secure: false, + false, }, { - secure: true, + true, }, } for _, c := range cases { diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_test.go index 16809de5e2..8facd30f53 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_test.go @@ -16,17 +16,24 @@ import ( // Test that terminating gateways work in a default and secure installations. func TestTerminatingGateway(t *testing.T) { cases := []struct { - secure bool + secure bool + autoEncrypt bool }{ { - secure: false, + false, + false, }, { - secure: true, + true, + true, + }, + { + true, + true, }, } for _, c := range cases { - name := fmt.Sprintf("secure: %t", c.secure) + name := fmt.Sprintf("secure: %t, auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) cfg := suite.Config() @@ -39,6 +46,7 @@ func TestTerminatingGateway(t *testing.T) { "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.autoEncrypt": strconv.FormatBool(c.autoEncrypt), } logger.Log(t, "creating consul cluster") diff --git a/acceptance/tests/vault/main_test.go b/acceptance/tests/vault/main_test.go index ca28509a63..1d3a5a5842 100644 --- a/acceptance/tests/vault/main_test.go +++ b/acceptance/tests/vault/main_test.go @@ -1,7 +1,6 @@ package vault import ( - "fmt" "os" "testing" @@ -11,8 +10,6 @@ import ( var suite testsuite.Suite func TestMain(m *testing.M) { - fmt.Println("Skipping vault tests because it's not supported with agentless yet") - os.Exit(0) - //suite = testsuite.NewSuite(m) - //os.Exit(suite.Run()) + suite = testsuite.NewSuite(m) + os.Exit(suite.Run()) } diff --git a/acceptance/tests/vault/vault_partitions_test.go b/acceptance/tests/vault/vault_partitions_test.go index f63b3254dd..5ff9ae7a6b 100644 --- a/acceptance/tests/vault/vault_partitions_test.go +++ b/acceptance/tests/vault/vault_partitions_test.go @@ -346,10 +346,11 @@ func TestVault_Partitions(t *testing.T) { // share the same node network (docker bridge), we can use // a NodePort service so that we can access node(s) in a different Kind cluster. if cfg.UseKind { + serverHelmValues["global.adminPartitions.service.type"] = "NodePort" + serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" serverHelmValues["meshGateway.service.type"] = "NodePort" serverHelmValues["meshGateway.service.nodePort"] = "30100" serverHelmValues["server.exposeService.type"] = "NodePort" - serverHelmValues["server.exposeService.nodePort.https"] = "30000" } helpers.MergeMaps(serverHelmValues, commonHelmValues) diff --git a/acceptance/tests/wan-federation/wan_federation_test.go b/acceptance/tests/wan-federation/wan_federation_test.go deleted file mode 100644 index e8b874895c..0000000000 --- a/acceptance/tests/wan-federation/wan_federation_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package wanfederation - -import ( - "context" - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/acceptance/framework/environment" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/api" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const StaticClientName = "static-client" - -// Test that Connect and wan federation over mesh gateways work in a default installation -// i.e. without ACLs because TLS is required for WAN federation over mesh gateways. -func TestWANFederation(t *testing.T) { - cases := []struct { - name string - secure bool - }{ - { - name: "secure", - secure: true, - }, - { - name: "default", - secure: false, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - - env := suite.Environment() - cfg := suite.Config() - - if cfg.UseKind { - t.Skipf("skipping wan federation tests as they currently fail on Kind even though they work on other clouds.") - } - - primaryContext := env.DefaultContext(t) - secondaryContext := env.Context(t, environment.SecondaryContextName) - - primaryHelmValues := map[string]string{ - "global.datacenter": "dc1", - - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.secure), - - "global.federation.enabled": "true", - "global.federation.createFederationSecret": "true", - - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - "global.acls.createReplicationToken": strconv.FormatBool(c.secure), - - "connectInject.enabled": "true", - "connectInject.replicas": "1", - "controller.enabled": "true", - - "meshGateway.enabled": "true", - "meshGateway.replicas": "1", - } - - if cfg.UseKind { - primaryHelmValues["meshGateway.service.type"] = "NodePort" - primaryHelmValues["meshGateway.service.nodePort"] = "30000" - } - - releaseName := helpers.RandomName() - - // Install the primary consul cluster in the default kubernetes context - primaryConsulCluster := consul.NewHelmCluster(t, primaryHelmValues, primaryContext, cfg, releaseName) - primaryConsulCluster.Create(t) - - // Get the federation secret from the primary cluster and apply it to secondary cluster - federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) - logger.Logf(t, "retrieving federation secret %s from the primary cluster and applying to the secondary", federationSecretName) - federationSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) - require.NoError(t, err) - federationSecret.ResourceVersion = "" - _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) - require.NoError(t, err) - - var k8sAuthMethodHost string - // When running on kind, the kube API address in kubeconfig will have a localhost address - // which will not work from inside the container. That's why we need to use the endpoints address instead - // which will point the node IP. - if cfg.UseKind { - // The Kubernetes AuthMethod host is read from the endpoints for the Kubernetes service. - kubernetesEndpoint, err := secondaryContext.KubernetesClient(t).CoreV1().Endpoints("default").Get(context.Background(), "kubernetes", metav1.GetOptions{}) - require.NoError(t, err) - k8sAuthMethodHost = fmt.Sprintf("%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) - } else { - k8sAuthMethodHost = k8s.KubernetesAPIServerHostFromOptions(t, secondaryContext.KubectlOptions(t)) - } - - // Create secondary cluster - secondaryHelmValues := map[string]string{ - "global.datacenter": "dc2", - - "global.tls.enabled": "true", - "global.tls.httpsOnly": "false", - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - "global.tls.caCert.secretName": federationSecretName, - "global.tls.caCert.secretKey": "caCert", - "global.tls.caKey.secretName": federationSecretName, - "global.tls.caKey.secretKey": "caKey", - - "global.federation.enabled": "true", - - "server.extraVolumes[0].type": "secret", - "server.extraVolumes[0].name": federationSecretName, - "server.extraVolumes[0].load": "true", - "server.extraVolumes[0].items[0].key": "serverConfigJSON", - "server.extraVolumes[0].items[0].path": "config.json", - - "connectInject.enabled": "true", - "connectInject.replicas": "1", - "controller.enabled": "true", - - "meshGateway.enabled": "true", - "meshGateway.replicas": "1", - } - - if c.secure { - secondaryHelmValues["global.acls.replicationToken.secretName"] = federationSecretName - secondaryHelmValues["global.acls.replicationToken.secretKey"] = "replicationToken" - secondaryHelmValues["global.federation.k8sAuthMethodHost"] = k8sAuthMethodHost - secondaryHelmValues["global.federation.primaryDatacenter"] = "dc1" - } - - if cfg.UseKind { - secondaryHelmValues["meshGateway.service.type"] = "NodePort" - secondaryHelmValues["meshGateway.service.nodePort"] = "30000" - } - - // Install the secondary consul cluster in the secondary kubernetes context - secondaryConsulCluster := consul.NewHelmCluster(t, secondaryHelmValues, secondaryContext, cfg, releaseName) - secondaryConsulCluster.Create(t) - - primaryClient, _ := primaryConsulCluster.SetupConsulClient(t, c.secure) - secondaryClient, _ := secondaryConsulCluster.SetupConsulClient(t, c.secure) - - // Verify federation between servers - logger.Log(t, "verifying federation was successful") - helpers.VerifyFederation(t, primaryClient, secondaryClient, releaseName, c.secure) - - // Create a ProxyDefaults resource to configure services to use the mesh - // gateways. - logger.Log(t, "creating proxy-defaults config") - kustomizeDir := "../fixtures/bases/mesh-gateway" - k8s.KubectlApplyK(t, secondaryContext.KubectlOptions(t), kustomizeDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, secondaryContext.KubectlOptions(t), kustomizeDir) - }) - - // Check that we can connect services over the mesh gateways - logger.Log(t, "creating static-server in dc2") - k8s.DeployKustomize(t, secondaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") - - logger.Log(t, "creating static-client in dc1") - k8s.DeployKustomize(t, primaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-multi-dc") - - if c.secure { - logger.Log(t, "creating intention") - _, _, err = primaryClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ - Kind: api.ServiceIntentions, - Name: "static-server", - Sources: []*api.SourceIntention{ - { - Name: StaticClientName, - Action: api.IntentionActionAllow, - }, - }, - }, nil) - require.NoError(t, err) - } - - logger.Log(t, "checking that connection is successful") - k8s.CheckStaticServerConnectionSuccessful(t, primaryContext.KubectlOptions(t), StaticClientName, "http://localhost:1234") - }) - } -} diff --git a/charts/consul/Chart.yaml b/charts/consul/Chart.yaml index 57f4be8492..8b7e2da587 100644 --- a/charts/consul/Chart.yaml +++ b/charts/consul/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: consul -version: 1.0.0-dev -appVersion: 1.14.0-beta1 +version: 0.49.0 +appVersion: 1.13.2 kubeVersion: ">=1.21.0-0" description: Official HashiCorp Consul Chart home: https://www.consul.io @@ -10,12 +10,12 @@ sources: - https://github.com/hashicorp/consul - https://github.com/hashicorp/consul-k8s annotations: - artifacthub.io/prerelease: true + artifacthub.io/prerelease: false artifacthub.io/images: | - name: consul - image: hashicorp/consul:1.14.0-beta1 + image: hashicorp/consul:1.13.2 - name: consul-k8s-control-plane - image: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.0.0-dev + image: hashicorp/consul-k8s-control-plane:0.49.0 - name: envoy image: envoyproxy/envoy:v1.23.1 artifacthub.io/license: MPL-2.0 diff --git a/charts/consul/README.md b/charts/consul/README.md index 4467df8df2..ccc695151d 100644 --- a/charts/consul/README.md +++ b/charts/consul/README.md @@ -30,7 +30,7 @@ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com). non-Kubernetes nodes to easily discover and access Kubernetes services. ### Prerequisites - * **Helm 3.6+** + * **Helm 3.2+** (Helm 2 is not supported) * **Kubernetes 1.21-1.24** - This is the earliest version of Kubernetes tested. It is possible that this chart works with earlier versions but it is untested. diff --git a/charts/consul/templates/_helpers.tpl b/charts/consul/templates/_helpers.tpl index 070be333a1..02b2adb39e 100644 --- a/charts/consul/templates/_helpers.tpl +++ b/charts/consul/templates/_helpers.tpl @@ -148,6 +148,19 @@ is passed to consul as a -config-file param on command line. [ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /consul/extra-config/extra-from-values.json {{- end -}} +{{/* +Sets up a list of recusor flags for Consul agents by iterating over the IPs of every nameserver +in /etc/resolv.conf and concatenating them into a string of arguments that can be passed directly +to the consul agent command. +*/}} +{{- define "consul.recursors" -}} + recursor_flags="" + for ip in $(cat /etc/resolv.conf | grep nameserver | cut -d' ' -f2) + do + recursor_flags="$recursor_flags -recursor=$ip" + done +{{- end -}} + {{/* Create chart name and version as used by the chart label. */}} @@ -223,9 +236,6 @@ This template is for an init container. consul-k8s-control-plane get-consul-client-ca \ -output-file=/consul/tls/client/ca/tls.crt \ -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ - {{- if .Values.global.cloud.enabled }} - -tls-server-name=server.{{.Values.global.datacenter}}.{{.Values.global.domain}} \ - {{- end}} {{- if .Values.externalServers.enabled }} {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} -server-addr={{ quote (first .Values.externalServers.hosts) }} \ @@ -302,106 +312,3 @@ Usage: {{ template "consul.validateVaultWebhookCertConfiguration" . }} {{ end }} {{ end }} {{- end -}} - -{{/* -Consul server environment variables for consul-k8s commands. -*/}} -{{- define "consul.consulK8sConsulServerEnvVars" -}} -- name: CONSUL_ADDRESSES - {{- if .Values.externalServers.enabled }} - value: {{ .Values.externalServers.hosts | first }} - {{- else }} - value: {{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc - {{- end }} -- name: CONSUL_GRPC_PORT - {{- if .Values.externalServers.enabled }} - value: "{{ .Values.externalServers.grpcPort }}" - {{- else }} - value: "8502" - {{- end }} -- name: CONSUL_HTTP_PORT - {{- if .Values.externalServers.enabled }} - value: "{{ .Values.externalServers.httpsPort }}" - {{- else if .Values.global.tls.enabled }} - value: "8501" - {{- else }} - value: "8500" - {{- end }} -- name: CONSUL_DATACENTER - value: {{ .Values.global.datacenter }} -- name: CONSUL_API_TIMEOUT - value: {{ .Values.global.consulAPITimeout }} -{{- if .Values.global.adminPartitions.enabled }} -- name: CONSUL_PARTITION - value: {{ .Values.global.adminPartitions.name }} -{{- if .Values.global.acls.manageSystemACLs }} -- name: CONSUL_LOGIN_PARTITION - value: {{ .Values.global.adminPartitions.name }} -{{- end }} -{{- end }} -{{- if .Values.global.tls.enabled }} -- name: CONSUL_USE_TLS - value: "true" -{{- if (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} -- name: CONSUL_CACERT_FILE - {{- if .Values.global.secretsBackend.vault.enabled }} - value: "/vault/secrets/serverca.crt" - {{- else }} - value: "/consul/tls/ca/tls.crt" - {{- end }} -{{- end }} -{{- if and .Values.externalServers.enabled .Values.externalServers.tlsServerName }} -- name: CONSUL_TLS_SERVER_NAME - value: {{ .Values.externalServers.tlsServerName }} -{{- end }} -{{- end }} -{{- end -}} - -{{/* -Fails global.cloud.enabled is true and one of the following secrets is nil or empty. -- global.cloud.resourceId.secretName -- global.cloud.clientId.secretName -- global.cloud.clientSecret.secretName - -Usage: {{ template "consul.validateRequiredCloudSecretsExist" . }} - -*/}} -{{- define "consul.validateRequiredCloudSecretsExist" -}} -{{- if (and .Values.global.cloud.enabled (or (not .Values.global.cloud.resourceId.secretName) (not .Values.global.cloud.clientId.secretName) (not .Values.global.cloud.clientSecret.secretName))) }} -{{fail "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set."}} -{{- end }} -{{- end -}} - -{{/* -Fails global.cloud.enabled is true and one of the following secrets has either an empty secretName or secretKey. -- global.cloud.resourceId.secretName / secretKey -- global.cloud.clientId.secretName / secretKey -- global.cloud.clientSecret.secretName / secretKey -- global.cloud.authUrl.secretName / secretKey -- global.cloud.apiHost.secretName / secretKey -- global.cloud.scadaAddress.secretName / secretKey -Usage: {{ template "consul.validateCloudSecretKeys" . }} - -*/}} -{{- define "consul.validateCloudSecretKeys" -}} -{{- if and .Values.global.cloud.enabled }} -{{- if or (and .Values.global.cloud.resourceId.secretName (not .Values.global.cloud.resourceId.secretKey)) (and .Values.global.cloud.resourceId.secretKey (not .Values.global.cloud.resourceId.secretName)) }} -{{fail "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.clientId.secretName (not .Values.global.cloud.clientId.secretKey)) (and .Values.global.cloud.clientId.secretKey (not .Values.global.cloud.clientId.secretName)) }} -{{fail "When either global.cloud.clientId.secretName or global.cloud.clientId.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.clientSecret.secretName (not .Values.global.cloud.clientSecret.secretKey)) (and .Values.global.cloud.clientSecret.secretKey (not .Values.global.cloud.clientSecret.secretName)) }} -{{fail "When either global.cloud.clientSecret.secretName or global.cloud.clientSecret.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.authUrl.secretName (not .Values.global.cloud.authUrl.secretKey)) (and .Values.global.cloud.authUrl.secretKey (not .Values.global.cloud.authUrl.secretName)) }} -{{fail "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.apiHost.secretName (not .Values.global.cloud.apiHost.secretKey)) (and .Values.global.cloud.apiHost.secretKey (not .Values.global.cloud.apiHost.secretName)) }} -{{fail "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.scadaAddress.secretName (not .Values.global.cloud.scadaAddress.secretKey)) (and .Values.global.cloud.scadaAddress.secretKey (not .Values.global.cloud.scadaAddress.secretName)) }} -{{fail "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set."}} -{{- end }} -{{- end }} -{{- end -}} diff --git a/charts/consul/templates/api-gateway-controller-deployment.yaml b/charts/consul/templates/api-gateway-controller-deployment.yaml index 0a41101e7d..604a017ecf 100644 --- a/charts/consul/templates/api-gateway-controller-deployment.yaml +++ b/charts/consul/templates/api-gateway-controller-deployment.yaml @@ -2,8 +2,6 @@ {{- if not .Values.client.grpc }}{{ fail "client.grpc must be true for api gateway" }}{{ end }} {{- if not .Values.apiGateway.image}}{{ fail "apiGateway.image must be set to enable api gateway" }}{{ end }} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} apiVersion: apps/v1 kind: Deployment metadata: diff --git a/charts/consul/templates/client-daemonset.yaml b/charts/consul/templates/client-daemonset.yaml index 38be15d12f..36edc70ddb 100644 --- a/charts/consul/templates/client-daemonset.yaml +++ b/charts/consul/templates/client-daemonset.yaml @@ -10,8 +10,6 @@ {{- if (and .Values.global.enterpriseLicense.secretName (not .Values.global.enterpriseLicense.secretKey)) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if (and (not .Values.global.enterpriseLicense.secretName) .Values.global.enterpriseLicense.secretKey) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} # DaemonSet to run the Consul clients on every node. apiVersion: apps/v1 kind: DaemonSet @@ -271,6 +269,9 @@ spec: {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.gossipEncryption.secretName }} GOSSIP_KEY=`cat /vault/secrets/gossip.txt` {{- end }} + {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} + {{ template "consul.recursors" }} + {{- end }} {{ template "consul.extraconfig" }} @@ -376,6 +377,9 @@ spec: {{- range $value := .Values.global.recursors }} -recursor={{ quote $value }} \ {{- end }} + {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} + $recursor_flags \ + {{- end }} -config-file=/consul/extra-config/extra-from-values.json \ -domain={{ .Values.global.domain }} volumeMounts: @@ -521,8 +525,6 @@ spec: {{- if .Values.externalServers.tlsServerName }} -tls-server-name={{ .Values.externalServers.tlsServerName }} \ {{- end }} - {{- else if .Values.global.cloud.enabled }} - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} \ {{- end }} -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -init-type="client" diff --git a/charts/consul/templates/client-snapshot-agent-deployment.yaml b/charts/consul/templates/client-snapshot-agent-deployment.yaml new file mode 100644 index 0000000000..19ffff23c0 --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-deployment.yaml @@ -0,0 +1,281 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if or (and .Values.client.snapshotAgent.configSecret.secretName (not .Values.client.snapshotAgent.configSecret.secretKey)) (and (not .Values.client.snapshotAgent.configSecret.secretName) .Values.client.snapshotAgent.configSecret.secretKey) }}{{fail "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." }}{{ end -}} +{{- if .Values.client.snapshotAgent.enabled }} +{{- if or (and .Values.client.snapshotAgent.configSecret.secretName (not .Values.client.snapshotAgent.configSecret.secretKey)) (and (not .Values.client.snapshotAgent.configSecret.secretName) .Values.client.snapshotAgent.configSecret.secretKey) }}{{fail "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." }}{{ end -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent +spec: + replicas: {{ .Values.client.snapshotAgent.replicas }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: client-snapshot-agent + template: + metadata: + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: client-snapshot-agent + annotations: + "consul.hashicorp.com/connect-inject": "false" + {{- if .Values.global.secretsBackend.vault.enabled }} + {{- if .Values.client.snapshotAgent.configSecret.secretName }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulSnapshotAgentRole }} + {{- else if and .Values.global.tls.enabled }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + {{- end }} + {{- if .Values.global.tls.enabled }} + "vault.hashicorp.com/agent-init-first": "true" + "vault.hashicorp.com/agent-inject": "true" + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{- end }} + {{- end }} + {{- if .Values.global.enterpriseLicense.secretName }} + {{- with .Values.global.enterpriseLicense }} + "vault.hashicorp.com/agent-inject-secret-enterpriselicense.txt": "{{ .secretName }}" + "vault.hashicorp.com/agent-inject-template-enterpriselicense.txt": {{ template "consul.vaultSecretTemplate" . }} + {{- end }} + {{- end }} + {{- if .Values.client.snapshotAgent.configSecret.secretName }} + {{- with .Values.client.snapshotAgent.configSecret }} + "vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json": "{{ .secretName }}" + "vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json": {{ template "consul.vaultSecretTemplate" . }} + {{- end }} + {{- end }} + {{- end }} + spec: + {{- if .Values.client.tolerations }} + tolerations: + {{ tpl .Values.client.tolerations . | nindent 8 | trim }} + {{- end }} + terminationGracePeriodSeconds: 10 + serviceAccountName: {{ template "consul.fullname" . }}-snapshot-agent + {{- if .Values.client.priorityClassName }} + priorityClassName: {{ .Values.client.priorityClassName | quote }} + {{- end }} + volumes: + {{- if .Values.client.snapshotAgent.caCert }} + - name: extra-ssl-certs + emptyDir: + medium: "Memory" + {{- end }} + {{- if (or .Values.global.acls.manageSystemACLs .Values.global.tls.enabled (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload)) }} + - name: consul-data + emptyDir: + medium: "Memory" + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} + - name: snapshot-config + secret: + secretName: {{ .Values.client.snapshotAgent.configSecret.secretName }} + items: + - key: {{ .Values.client.snapshotAgent.configSecret.secretKey }} + path: snapshot-config.json + {{- end }} + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled) (not .Values.global.acls.manageSystemACLs)) }} + - name: consul-license + secret: + secretName: {{ .Values.global.enterpriseLicense.secretName }} + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} + {{- end }} + containers: + - name: consul-snapshot-agent + image: "{{ default .Values.global.image .Values.client.image }}" + env: + {{- if .Values.client.snapshotAgent.caCert }} + - name: SSL_CERT_DIR + value: "/etc/ssl/certs:/extra-ssl-certs" + {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: /consul/login/acl-token + {{- else }} + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload) }} + - name: CONSUL_LICENSE_PATH + {{- if .Values.global.secretsBackend.vault.enabled }} + value: /vault/secrets/enterpriselicense.txt + {{- else }} + value: /consul/license/{{ .Values.global.enterpriseLicense.secretKey }} + {{- end }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + {{- if .Values.client.snapshotAgent.caCert }} + cat < /extra-ssl-certs/custom-ca.pem + {{- .Values.client.snapshotAgent.caCert | nindent 14 }} + EOF + {{- end }} + exec /bin/consul snapshot agent \ + -interval={{ .Values.client.snapshotAgent.interval }} \ + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -config-file=/vault/secrets/snapshot-agent-config.json \ + {{- else }} + -config-dir=/consul/config \ + {{- end }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + -config-dir=/consul/login \ + {{- end }} + volumeMounts: + {{- if .Values.client.snapshotAgent.caCert }} + - name: extra-ssl-certs + mountPath: /extra-ssl-certs + readOnly: false + {{- end }} + {{- if (or .Values.global.acls.manageSystemACLs .Values.global.tls.enabled (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload)) }} + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} + - name: snapshot-config + mountPath: /consul/config + readOnly: true + {{- end }} + - mountPath: /consul/login + name: consul-data + readOnly: true + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled) (not .Values.global.acls.manageSystemACLs))}} + - name: consul-license + mountPath: /consul/license + readOnly: true + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt}} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + /bin/consul logout + {{- end }} + {{- with .Values.client.snapshotAgent.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if (or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt)) }} + initContainers: + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: snapshot-agent-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=snapshot-agent \ + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -token-sink-file=/consul/login/acl-token \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "25Mi" + cpu: "50m" + {{- end }} + {{- end }} + {{- if .Values.client.nodeSelector }} + nodeSelector: + {{ tpl .Values.client.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/client-snapshot-agent-podsecuritypolicy.yaml b/charts/consul/templates/client-snapshot-agent-podsecuritypolicy.yaml new file mode 100644 index 0000000000..dd324a3971 --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-podsecuritypolicy.yaml @@ -0,0 +1,42 @@ +{{- if (and .Values.global.enablePodSecurityPolicies (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled))) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/charts/consul/templates/client-snapshot-agent-role.yaml b/charts/consul/templates/client-snapshot-agent-role.yaml new file mode 100644 index 0000000000..3077bc96f0 --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-role.yaml @@ -0,0 +1,26 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent +{{- if .Values.global.enablePodSecurityPolicies }} +rules: +- apiGroups: [ "policy" ] + resources: [ "podsecuritypolicies" ] + resourceNames: + - {{ template "consul.fullname" . }}-snapshot-agent + verbs: + - use +{{- else }} +rules: [ ] +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/client-snapshot-agent-rolebinding.yaml b/charts/consul/templates/client-snapshot-agent-rolebinding.yaml new file mode 100644 index 0000000000..e966c4e2a8 --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "consul.fullname" . }}-snapshot-agent +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-snapshot-agent +{{- end }} +{{- end }} diff --git a/charts/consul/templates/client-snapshot-agent-serviceaccount.yaml b/charts/consul/templates/client-snapshot-agent-serviceaccount.yaml new file mode 100644 index 0000000000..a485ff0a5c --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-serviceaccount.yaml @@ -0,0 +1,25 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent + {{- if .Values.client.snapshotAgent.serviceAccount.annotations }} + annotations: + {{ tpl .Values.client.snapshotAgent.serviceAccount.annotations . | nindent 4 | trim }} + {{- end }} +{{- with .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range . }} + - name: {{ .name }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/connect-inject-deployment.yaml b/charts/consul/templates/connect-inject-deployment.yaml index 01b792b0f0..779dceb572 100644 --- a/charts/consul/templates/connect-inject-deployment.yaml +++ b/charts/consul/templates/connect-inject-deployment.yaml @@ -1,15 +1,19 @@ {{- if and .Values.global.peering.enabled (not .Values.connectInject.enabled) }}{{ fail "setting global.peering.enabled to true requires connectInject.enabled to be true" }}{{ end }} -{{- if and .Values.global.peering.enabled (not .Values.global.tls.enabled) }}{{ fail "setting global.peering.enabled to true requires global.tls.enabled to be true" }}{{ end }} -{{- if and .Values.global.peering.enabled (not .Values.meshGateway.enabled) }}{{ fail "setting global.peering.enabled to true requires meshGateway.enabled to be true" }}{{ end }} {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled for connect injection" }}{{ end }} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true for connect injection" }}{{ end }} +{{- if and .Values.connectInject.consulNamespaces.mirroringK8S (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if mirroringK8S=true" }}{{ end }} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} +{{- if .Values.connectInject.centralConfig }}{{- if eq (toString .Values.connectInject.centralConfig.enabled) "false" }}{{ fail "connectInject.centralConfig.enabled cannot be set to false; to disable, set enable_central_service_config to false in server.extraConfig and client.extraConfig" }}{{ end -}}{{ end -}} +{{- if .Values.connectInject.centralConfig }}{{- if .Values.connectInject.centralConfig.defaultProtocol }}{{ fail "connectInject.centralConfig.defaultProtocol is no longer supported; instead you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" }}{{ end }}{{ end -}} +{{- if .Values.connectInject.centralConfig }}{{ if .Values.connectInject.centralConfig.proxyDefaults }}{{- if ne (trim .Values.connectInject.centralConfig.proxyDefaults) `{}` }}{{ fail "connectInject.centralConfig.proxyDefaults is no longer supported; instead you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" }}{{ end }}{{ end }}{{ end -}} +{{- if .Values.connectInject.imageEnvoy }}{{ fail "connectInject.imageEnvoy must be specified in global.imageEnvoy" }}{{ end }} +{{- if .Values.global.lifecycleSidecarContainer }}{{ fail "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." }}{{ end }} {{ template "consul.validateVaultWebhookCertConfiguration" . }} {{- template "consul.reservedNamesFailer" (list .Values.connectInject.consulNamespaces.consulDestinationNamespace "connectInject.consulNamespaces.consulDestinationNamespace") }} {{- $serverEnabled := (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) -}} -{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") .Values.global.adminPartitions.enabled)) -}} -{{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} +{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") (or .Values.global.peering.enabled .Values.global.adminPartitions.enabled))) -}} +{{- if not (or (eq .Values.global.peering.tokenGeneration.serverAddresses.source "") (or (eq .Values.global.peering.tokenGeneration.serverAddresses.source "static") (eq .Values.global.peering.tokenGeneration.serverAddresses.source "consul"))) }}{{ fail "global.peering.tokenGeneration.serverAddresses.source must be one of empty string, 'consul' or 'static'" }}{{ end }} # The deployment for running the Connect sidecar injector apiVersion: apps/v1 kind: Deployment @@ -89,47 +93,42 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 12 }} {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} - {{- else }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method - {{- end }} - - name: CONSUL_LOGIN_DATACENTER - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} - value: {{ .Values.global.federation.primaryDatacenter }} - {{- else }} - value: {{ .Values.global.datacenter }} - {{- end }} - - name: CONSUL_LOGIN_META - value: "component=connect-injector,pod=$(NAMESPACE)/$(POD_NAME)" + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" {{- end }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP {{- if (and .Values.connectInject.aclInjectToken.secretName .Values.connectInject.aclInjectToken.secretKey) }} - - name: CONSUL_ACL_TOKEN + - name: CONSUL_HTTP_TOKEN valueFrom: secretKeyRef: name: {{ .Values.connectInject.aclInjectToken.secretName }} key: {{ .Values.connectInject.aclInjectToken.secretKey }} {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane inject-connect \ - {{- if .Values.global.federation.enabled }} - -enable-federation \ - {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ default .Values.global.logLevel .Values.connectInject.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ -default-inject={{ .Values.connectInject.default }} \ -consul-image="{{ default .Values.global.image .Values.connectInject.imageConsul }}" \ - -consul-dataplane-image="{{ .Values.global.imageConsulDataplane }}" \ + -envoy-image="{{ .Values.global.imageEnvoy }}" \ -consul-k8s-image="{{ default .Values.global.imageK8S .Values.connectInject.image }}" \ -release-name="{{ .Release.Name }}" \ -release-namespace="{{ .Release.Namespace }}" \ @@ -143,6 +142,23 @@ spec: -enable-cni={{ .Values.connectInject.cni.enabled }} \ {{- if .Values.global.peering.enabled }} -enable-peering=true \ + {{- if (eq .Values.global.peering.tokenGeneration.serverAddresses.source "") }} + {{- if (and $serverEnabled $serverExposeServiceEnabled) }} + -read-server-expose-service=true \ + {{- else }} + {{- if .Values.externalServers.enabled }} + {{- $port := .Values.externalServers.grpcPort }} + {{- range $h := .Values.externalServers.hosts }} + -token-server-address="{{ $h }}:{{ $port }}" \ + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if (eq .Values.global.peering.tokenGeneration.serverAddresses.source "static") }} + {{- range $addr := .Values.global.peering.tokenGeneration.serverAddresses.static }} + -token-server-address="{{ $addr }}" \ + {{- end }} + {{- end }} {{- end }} {{- if .Values.global.openshift.enabled }} -enable-openshift \ @@ -163,7 +179,6 @@ spec: {{- else }} -default-enable-metrics=false \ {{- end }} - -enable-gateway-metrics={{ .Values.global.metrics.enableGatewayMetrics }} \ -default-enable-metrics-merging={{ .Values.connectInject.metrics.defaultEnableMerging }} \ -default-merged-metrics-port={{ .Values.connectInject.metrics.defaultMergedMetricsPort }} \ -default-prometheus-scrape-port={{ .Values.connectInject.metrics.defaultPrometheusScrapePort }} \ @@ -184,13 +199,14 @@ spec: {{- end }} {{- if .Values.global.adminPartitions.enabled }} -enable-partitions=true \ + -partition={{ .Values.global.adminPartitions.name }} \ {{- end }} {{- if .Values.global.enableConsulNamespaces }} -enable-namespaces=true \ {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} -consul-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ {{- end }} - {{- if and .Values.global.enableConsulNamespaces .Values.connectInject.consulNamespaces.mirroringK8S }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} -enable-k8s-namespace-mirroring=true \ {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} -k8s-namespace-mirroring-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ @@ -253,15 +269,21 @@ spec: -default-consul-sidecar-cpu-request={{ $consulSidecarResources.requests.cpu }} \ {{- end }} {{- end }} - {{- if .Values.global.cloud.enabled }} - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} \ - {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane consul-logout -consul-api-timeout={{ .Values.global.consulAPITimeout }} + {{- end }} startupProbe: httpGet: path: /readyz/ready port: 9445 scheme: HTTP - initialDelaySeconds: 30 failureThreshold: 15 periodSeconds: 2 timeoutSeconds: 5 @@ -289,8 +311,15 @@ spec: mountPath: /etc/connect-injector/certs readOnly: true {{- end }} - {{- if and .Values.global.tls.enabled (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots))}} + - mountPath: /consul/login + name: consul-data + readOnly: true + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} - name: consul-ca-cert + {{- end }} mountPath: /consul/tls/ca readOnly: true {{- end }} @@ -305,6 +334,9 @@ spec: defaultMode: 420 secretName: {{ template "consul.fullname" . }}-connect-inject-webhook-cert {{- end }} + - name: consul-data + emptyDir: + medium: "Memory" {{- if .Values.global.tls.enabled }} {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert @@ -318,6 +350,74 @@ spec: - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} path: tls.crt {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} + {{- if or (and .Values.global.acls.manageSystemACLs) (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + initContainers: + {{- if and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: connect-injector-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=connect-injector \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel .Values.connectInject.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "25Mi" + cpu: "50m" + {{- end }} {{- end }} {{- if .Values.connectInject.priorityClassName }} priorityClassName: {{ .Values.connectInject.priorityClassName | quote }} diff --git a/charts/consul/templates/connect-injector-disruptionbudget.yaml b/charts/consul/templates/connect-injector-disruptionbudget.yaml index 9b9cf2e39e..08f1401fbe 100644 --- a/charts/consul/templates/connect-injector-disruptionbudget.yaml +++ b/charts/consul/templates/connect-injector-disruptionbudget.yaml @@ -17,11 +17,7 @@ metadata: release: {{ .Release.Name }} component: connect-injector spec: - {{- if .Values.connectInject.disruptionBudget.minAvailable }} - minAvailable: {{ .Values.connectInject.disruptionBudget.minAvailable }} - {{- else }} maxUnavailable: {{ template "consul.pdb.connectInject.maxUnavailable" . }} - {{- end }} selector: matchLabels: app: {{ template "consul.name" . }} diff --git a/charts/consul/templates/controller-deployment.yaml b/charts/consul/templates/controller-deployment.yaml index c8b884d2c9..44b13553bc 100644 --- a/charts/consul/templates/controller-deployment.yaml +++ b/charts/consul/templates/controller-deployment.yaml @@ -1,9 +1,6 @@ {{- if .Values.controller.enabled }} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} -{{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} {{ template "consul.validateVaultWebhookCertConfiguration" . }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} apiVersion: apps/v1 kind: Deployment metadata: @@ -68,12 +65,76 @@ spec: {{- end }} {{- end }} spec: + {{- if or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + initContainers: + {{- if and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: controller-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=controller \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel .Values.controller.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "25Mi" + cpu: "50m" + {{- end }} + {{- end }} containers: - command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane controller \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ default .Values.global.logLevel .Values.controller.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ -resource-prefix={{ template "consul.fullname" . }} \ @@ -83,6 +144,10 @@ spec: {{- else }} -webhook-tls-cert-dir=/tmp/controller-webhook/certs \ {{- end }} + -datacenter={{ .Values.global.datacenter }} \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} -enable-leader-election \ {{- if .Values.global.enableConsulNamespaces }} -enable-namespaces=true \ @@ -99,42 +164,42 @@ spec: -consul-cross-namespace-acl-policy=cross-namespace-policy \ {{- end }} {{- end }} - {{- if .Values.global.cloud.enabled }} - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} \ - {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane consul-logout -consul-api-timeout={{ .Values.global.consulAPITimeout }} + {{- end }} env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} - {{- else }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method - {{- end }} - - name: CONSUL_LOGIN_DATACENTER - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ .Values.global.federation.primaryDatacenter }} - {{- else }} - value: {{ .Values.global.datacenter }} - {{- end }} - - name: CONSUL_LOGIN_META - value: "component=controller,pod=$(POD_NAME)" + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP {{- if (and .Values.controller.aclToken.secretName .Values.controller.aclToken.secretKey) }} - - name: CONSUL_ACL_TOKEN + - name: CONSUL_HTTP_TOKEN valueFrom: secretKeyRef: name: {{ .Values.controller.aclToken.secretName }} key: {{ .Values.controller.aclToken.secretKey }} {{- end }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} image: {{ .Values.global.imageK8S }} name: controller ports: @@ -145,28 +210,21 @@ spec: resources: {{- toYaml . | nindent 12 }} {{- end }} - startupProbe: - tcpSocket: - port: 9443 - initialDelaySeconds: 30 - failureThreshold: 15 - periodSeconds: 2 - timeoutSeconds: 5 - readinessProbe: - tcpSocket: - port: 9443 - failureThreshold: 2 - initialDelaySeconds: 2 - successThreshold: 1 - timeoutSeconds: 5 volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: true {{- if not (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.controller.tlsCert.secretName) }} - mountPath: /tmp/controller-webhook/certs name: cert readOnly: true {{- end }} - {{- if and .Values.global.tls.enabled (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots))}} + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} - name: consul-ca-cert + {{- end }} mountPath: /consul/tls/ca readOnly: true {{- end }} @@ -191,7 +249,15 @@ spec: - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} path: tls.crt {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} {{- end }} + - name: consul-data + emptyDir: + medium: "Memory" serviceAccountName: {{ template "consul.fullname" . }}-controller {{- if .Values.controller.nodeSelector }} nodeSelector: diff --git a/charts/consul/templates/crd-meshes.yaml b/charts/consul/templates/crd-meshes.yaml index 0c6b9fa039..b795e48d33 100644 --- a/charts/consul/templates/crd-meshes.yaml +++ b/charts/consul/templates/crd-meshes.yaml @@ -63,18 +63,6 @@ spec: required: - sanitizeXForwardedClientCert type: object - peering: - description: Peering defines the peering configuration for the service - mesh. - properties: - peerThroughMeshGateways: - description: PeerThroughMeshGateways determines whether peering - traffic between control planes should flow through mesh gateways. - If enabled, Consul servers will advertise mesh gateway addresses - as their own. Additionally, mesh gateways will configure themselves - to expose the local servers using a peering-specific SNI. - type: boolean - type: object tls: description: TLS defines the TLS configuration for the service mesh. properties: diff --git a/charts/consul/templates/crd-servicedefaults.yaml b/charts/consul/templates/crd-servicedefaults.yaml index 1cf42673a8..636e8ce51f 100644 --- a/charts/consul/templates/crd-servicedefaults.yaml +++ b/charts/consul/templates/crd-servicedefaults.yaml @@ -113,17 +113,6 @@ spec: TLS SNI value to be changed to a non-connect value when federating with an external system. type: string - localConnectTimeoutMs: - description: The number of milliseconds allowed to make connections - to the local application instance before timing out. Defaults to - 5000. - type: integer - localRequestTimeoutMs: - description: In milliseconds, the timeout for HTTP requests to the - local application instance. Applies to HTTP-based protocols only. - If not specified, inherits the Envoy default for route timeouts - (15s). - type: integer maxInboundConnections: description: MaxInboundConnections is the maximum number of concurrent inbound connections to each service instance. Defaults to 0 (using diff --git a/charts/consul/templates/crd-serviceresolvers.yaml b/charts/consul/templates/crd-serviceresolvers.yaml index ab2d2e125a..c06063f318 100644 --- a/charts/consul/templates/crd-serviceresolvers.yaml +++ b/charts/consul/templates/crd-serviceresolvers.yaml @@ -88,37 +88,6 @@ spec: service to resolve as the failover group of instances. If empty the default subset for the requested service is used. type: string - targets: - description: Targets specifies a fixed list of failover targets - to try during failover. - items: - properties: - datacenter: - description: Datacenter specifies the datacenter to try - during failover. - type: string - namespace: - description: Namespace specifies the namespace to try - during failover. - type: string - partition: - description: Partition specifies the partition to try - during failover. - type: string - peer: - description: Peer specifies the name of the cluster peer - to try during failover. - type: string - service: - description: Service specifies the name of the service - to try during failover. - type: string - serviceSubset: - description: ServiceSubset specifies the service subset - to try during failover. - type: string - type: object - type: array type: object description: Failover controls when and how to reroute traffic to an alternate pool of service instances. The map is keyed by the @@ -228,10 +197,6 @@ spec: service from instead of the current partition. If empty the current partition is assumed. type: string - peer: - description: Peer is the name of the cluster peer to resolve the - service from instead of the current one. - type: string service: description: Service is a service to resolve instead of the current service. diff --git a/charts/consul/templates/create-federation-secret-job.yaml b/charts/consul/templates/create-federation-secret-job.yaml index 40b81957d1..6bc2b9f4d8 100644 --- a/charts/consul/templates/create-federation-secret-job.yaml +++ b/charts/consul/templates/create-federation-secret-job.yaml @@ -2,8 +2,6 @@ {{- if not .Values.global.federation.enabled }}{{ fail "global.federation.enabled must be true when global.federation.createFederationSecret is true" }}{{ end }} {{- if and (not .Values.global.acls.createReplicationToken) .Values.global.acls.manageSystemACLs }}{{ fail "global.acls.createReplicationToken must be true when global.acls.manageSystemACLs is true because the federation secret must include the replication token" }}{{ end }} {{- if eq (int .Values.server.updatePartition) 0 }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} apiVersion: batch/v1 kind: Job metadata: @@ -68,6 +66,13 @@ spec: items: - key: {{ default "tls.key" .Values.global.tls.caKey.secretKey }} path: tls.key + {{- /* We must incude both auto-encrypt and server CAs because we make API calls to the local + Consul client (requiring the auto-encrypt CA) but the secret generated must include the server CA */}} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} {{- if (and .Values.global.gossipEncryption.secretName .Values.global.gossipEncryption.secretKey) }} - name: gossip-encryption-key secret: @@ -84,6 +89,11 @@ spec: path: gossip.key {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + initContainers: + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + containers: - name: create-federation-secret image: "{{ .Values.global.imageK8S }}" @@ -92,10 +102,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: CONSUL_HTTP_ADDR - value: "https://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8501" + value: https://$(HOST_IP):8501 - name: CONSUL_CACERT + {{- if .Values.global.tls.enableAutoEncrypt }} + value: /consul/tls/client/ca/tls.crt + {{- else }} value: /consul/tls/ca/tls.crt + {{- end }} volumeMounts: - name: consul-ca-cert mountPath: /consul/tls/ca @@ -103,6 +121,11 @@ spec: - name: consul-ca-key mountPath: /consul/tls/server/ca readOnly: true + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + mountPath: /consul/tls/client/ca + readOnly: true + {{- end }} {{- if (or .Values.global.gossipEncryption.autoGenerate (and .Values.global.gossipEncryption.secretName .Values.global.gossipEncryption.secretKey)) }} - name: gossip-encryption-key mountPath: /consul/gossip diff --git a/charts/consul/templates/expose-servers-service.yaml b/charts/consul/templates/expose-servers-service.yaml index d86cec9042..fcfaf8038f 100644 --- a/charts/consul/templates/expose-servers-service.yaml +++ b/charts/consul/templates/expose-servers-service.yaml @@ -1,5 +1,5 @@ {{- $serverEnabled := (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) -}} -{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") .Values.global.adminPartitions.enabled)) -}} +{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") (or .Values.global.peering.enabled .Values.global.adminPartitions.enabled))) -}} {{- if (and $serverEnabled $serverExposeServiceEnabled) }} # Service with an external IP to reach Consul servers. @@ -52,7 +52,7 @@ spec: {{- end }} - name: grpc port: 8502 - targetPort: 8502 + targetPort: 8503 {{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.grpc) }} nodePort: {{ .Values.server.exposeService.nodePort.grpc }} {{- end }} diff --git a/charts/consul/templates/ingress-gateways-deployment.yaml b/charts/consul/templates/ingress-gateways-deployment.yaml index a0efdceff9..77a4d9dcd8 100644 --- a/charts/consul/templates/ingress-gateways-deployment.yaml +++ b/charts/consul/templates/ingress-gateways-deployment.yaml @@ -1,9 +1,9 @@ {{- if .Values.ingressGateways.enabled }} {{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true" }}{{ end -}} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled" }}{{ end -}} {{- if .Values.global.lifecycleSidecarContainer }}{{ fail "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." }}{{ end }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} {{- $root := . }} {{- $defaults := .Values.ingressGateways.defaults }} @@ -65,47 +65,7 @@ spec: release: {{ $root.Release.Name }} component: ingress-gateway ingress-gateway-name: {{ template "consul.fullname" $root }}-{{ .name }} - consul.hashicorp.com/connect-inject-managed-by: consul-k8s-endpoints-controller annotations: - "consul.hashicorp.com/connect-inject": "false" - "consul.hashicorp.com/gateway-kind": "ingress-gateway" - "consul.hashicorp.com/gateway-consul-service-name": "{{ .name }}" - {{- if $root.Values.global.enableConsulNamespaces }} - "consul.hashicorp.com/gateway-namespace": {{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} - "consul.hashicorp.com/gateway-wan-address-source": "Service" - {{- $serviceType := (default $defaults.service.type $service.type) }} - {{- if (eq $serviceType "NodePort") }} - {{- if $service.ports }} - {{- $firstPort := first $service.ports}} - {{- if $firstPort.nodePort }} - "consul.hashicorp.com/gateway-wan-port": "{{ $firstPort.nodePort }}" - {{- else }}{{ fail "if ingressGateways .service.type=NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a nodePort" }} - {{- end }} - {{- else if $defaults.service.ports }} - {{- $firstDefaultPort := first $defaults.service.ports}} - {{- if $firstDefaultPort.nodePort }} - "consul.hashicorp.com/gateway-wan-port": "{{ $firstDefaultPort.nodePort }}" - {{- else }}{{ fail "if ingressGateways .service.type=NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a nodePort" }} - {{- end }} - {{- else }}{{ fail "if ingressGateways .service.type=NodePort, the first port entry in either the defaults or specific gateway must include a nodePort" }} - {{- end }} - {{- else }} - {{- if $service.ports }} - {{- $firstPort := first $service.ports}} - {{- if $firstPort.port }} - "consul.hashicorp.com/gateway-wan-port": "{{ $firstPort.port }}" - {{- else }}{{ fail "if ingressGateways .service.type is not NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a port" }} - {{- end }} - {{- else if $defaults.service.ports }} - {{- $firstDefaultPort := first $defaults.service.ports}} - {{- if $firstDefaultPort.port }} - "consul.hashicorp.com/gateway-wan-port": "{{ $firstDefaultPort.port }}" - {{- else }}{{ fail "if ingressGateways .service.type is not NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a port" }} - {{- end }} - {{- else }}{{ fail "if ingressGateways .service.type is not NodePort, the first port entry in either the defaults or specific gateway must include a port" }} - {{- end }} - {{- end }} {{- if (and $root.Values.global.secretsBackend.vault.enabled $root.Values.global.tls.enabled) }} "vault.hashicorp.com/agent-init-first": "true" "vault.hashicorp.com/agent-inject": "true" @@ -120,6 +80,7 @@ spec: {{ tpl $root.Values.global.secretsBackend.vault.agentAnnotations $root | nindent 8 | trim }} {{- end }} {{- end }} + "consul.hashicorp.com/connect-inject": "false" {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} "prometheus.io/scrape": "true" "prometheus.io/path": "/metrics" @@ -148,175 +109,374 @@ spec: terminationGracePeriodSeconds: {{ default $defaults.terminationGracePeriodSeconds .terminationGracePeriodSeconds }} serviceAccountName: {{ template "consul.fullname" $root }}-{{ .name }} volumes: - - name: consul-service - emptyDir: - medium: "Memory" - {{- if $root.Values.global.tls.enabled }} - {{- if not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) }} - - name: consul-ca-cert - secret: - {{- if $root.Values.global.tls.caCert.secretName }} - secretName: {{ $root.Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" $root }}-ca-cert - {{- end }} - items: - - key: {{ default "tls.crt" $root.Values.global.tls.caCert.secretKey }} - path: tls.crt - {{- end }} - {{- end }} - initContainers: - # ingress-gateway-init registers the ingress gateway service with Consul. - - name: ingress-gateway-init - image: {{ $root.Values.global.imageK8S }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- include "consul.consulK8sConsulServerEnvVars" $root | nindent 8 }} - {{- if $root.Values.global.enableConsulNamespaces }} - - name: CONSUL_NAMESPACE - value: {{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} - {{- if $root.Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - value: {{ template "consul.fullname" $root }}-k8s-component-auth-method - - name: CONSUL_LOGIN_DATACENTER - value: {{ $root.Values.global.datacenter }} - - name: CONSUL_LOGIN_META - value: "component=ingress-gateway,pod=$(NAMESPACE)/$(POD_NAME)" - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${NAMESPACE} \ - -gateway-kind="ingress-gateway" \ - -consul-node-name="k8s-service-mesh" \ - -proxy-id-file=/consul/service/proxy-id \ - -service-name={{ template "consul.fullname" $root }}-{{ .name }} \ - -log-level={{ default $root.Values.global.logLevel }} \ - -log-json={{ $root.Values.global.logJSON }} - volumeMounts: + - name: consul-bin + emptyDir: {} - name: consul-service - mountPath: /consul/service - {{- if and $root.Values.global.tls.enabled (not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots)) }} + emptyDir: + medium: "Memory" + {{- if $root.Values.global.tls.enabled }} + {{- if not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) }} - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true + secret: + {{- if $root.Values.global.tls.caCert.secretName }} + secretName: {{ $root.Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" $root }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" $root.Values.global.tls.caCert.secretKey }} + path: tls.crt {{- end }} - resources: - requests: - memory: "50Mi" - cpu: "50m" - limits: - memory: "50Mi" - cpu: "50m" - containers: - - name: ingress-gateway - image: {{ $root.Values.global.imageConsulDataplane | quote }} - {{- if (default $defaults.resources .resources) }} - resources: {{ toYaml (default $defaults.resources .resources) | nindent 10 }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" {{- end }} - volumeMounts: - {{- if and $root.Values.global.tls.enabled (not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots)) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true {{- end }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - command: - - /bin/sh - - -ec - - | - consul-dataplane \ - -envoy-ready-bind-address=$POD_IP \ - -envoy-ready-bind-port=21000 \ - {{- if $root.Values.externalServers.enabled }} - -addresses={{ $root.Values.externalServers.hosts | first | quote }} \ + initContainers: + # We use the Envoy image as our base image so we use an init container to + # copy the Consul binary to a shared directory that can be used when + # starting Envoy. + - name: copy-consul-bin + image: {{ $root.Values.global.image | quote }} + command: + - cp + - /bin/consul + - /consul-bin/consul + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- $initContainer := .initCopyConsulContainer }} + {{- if (or $initContainer $defaults.initCopyConsulContainer) }} + {{- if (default $defaults.initCopyConsulContainer.resources $initContainer.resources) }} + resources: {{ toYaml (default $defaults.initCopyConsulContainer.resources $initContainer.resources) | nindent 12 }} + {{- end }} + {{- end }} + {{- if (and $root.Values.global.tls.enabled $root.Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" $root | nindent 8 }} + {{- end }} + # ingress-gateway-init registers the ingress gateway service with Consul. + - name: ingress-gateway-init + image: {{ $root.Values.global.imageK8S }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt {{- else }} - -addresses="{{ template "consul.fullname" $root }}-server.{{ $root.Release.Namespace }}.svc" \ + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 {{- end }} - {{- if $root.Values.externalServers.enabled }} - -grpc-port={{ $root.Values.externalServers.grpcPort }} \ - {{- else }} - -grpc-port=8502 \ + command: + - "/bin/sh" + - "-ec" + - | + {{- if $root.Values.global.acls.manageSystemACLs }} + consul-k8s-control-plane acl-init \ + -component-name=ingress-gateway/{{ template "consul.fullname" $root }}-{{ .name }} \ + -acl-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method \ + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -token-sink-file=/consul/service/acl-token \ + -consul-api-timeout={{ $root.Values.global.consulAPITimeout }} \ + -log-level={{ default $root.Values.global.logLevel }} \ + -log-json={{ $root.Values.global.logJSON }} + {{ end }} + + {{- $serviceType := (default $defaults.service.type $service.type) }} + {{- if (eq $serviceType "NodePort") }} + WAN_ADDR="${HOST_IP}" + {{- else if (or (eq $serviceType "ClusterIP") (eq $serviceType "LoadBalancer")) }} + consul-k8s-control-plane service-address \ + -log-level={{ $root.Values.global.logLevel }} \ + -log-json={{ $root.Values.global.logJSON }} \ + -k8s-namespace={{ $root.Release.Namespace }} \ + -name={{ template "consul.fullname" $root }}-{{ .name }} \ + -output-file=/tmp/address.txt + WAN_ADDR="$(cat /tmp/address.txt)" + {{- else }} + {{- fail "currently set ingressGateway value service.type is not supported" }} + {{- end }} + + {{- if (eq $serviceType "NodePort") }} + {{- if $service.ports }} + {{- $firstPort := first $service.ports}} + {{- if $firstPort.nodePort }} + WAN_PORT={{ $firstPort.nodePort }} + {{- else }}{{ fail "if ingressGateways .service.type=NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a nodePort" }} + {{- end }} + {{- else if $defaults.service.ports }} + {{- $firstDefaultPort := first $defaults.service.ports}} + {{- if $firstDefaultPort.nodePort }} + WAN_PORT={{ $firstDefaultPort.nodePort }} + {{- else }}{{ fail "if ingressGateways .service.type=NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a nodePort" }} + {{- end }} + {{- else }}{{ fail "if ingressGateways .service.type=NodePort, the first port entry in either the defaults or specific gateway must include a nodePort" }} {{- end }} - -proxy-service-id=$POD_NAME \ - -service-node-name="k8s-service-mesh" \ - {{- if $root.Values.global.enableConsulNamespaces }} - -service-namespace={{ (default $defaults.consulNamespace .consulNamespace) }} \ + + {{- else }} + {{- if $service.ports }} + {{- $firstPort := first $service.ports}} + {{- if $firstPort.port }} + WAN_PORT={{ $firstPort.port }} + {{- else }}{{ fail "if ingressGateways .service.type is not NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a port" }} + {{- end }} + {{- else if $defaults.service.ports }} + {{- $firstDefaultPort := first $defaults.service.ports}} + {{- if $firstDefaultPort.port }} + WAN_PORT={{ $firstDefaultPort.port }} + {{- else }}{{ fail "if ingressGateways .service.type is not NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a port" }} + {{- end }} + {{- else }}{{ fail "if ingressGateways .service.type is not NodePort, the first port entry in either the defaults or specific gateway must include a port" }} {{- end }} - {{- if and $root.Values.global.tls.enabled }} - {{- if (not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots)) }} - -ca-certs=/consul/tls/ca/tls.crt \ + {{- end }} + + cat > /consul/service/service.hcl << EOF + service { + kind = "ingress-gateway" + name = "{{ .name }}" + id = "${POD_NAME}" + {{- if $root.Values.global.enableConsulNamespaces }} + namespace = "{{ (default $defaults.consulNamespace .consulNamespace) }}" + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + partition = "{{ $root.Values.global.adminPartitions.name }}" + {{- end }} + port = ${WAN_PORT} + address = "${WAN_ADDR}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 21000 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + proxy { + config { + {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} + envoy_prometheus_bind_addr = "${POD_IP}:20200" + {{- end }} + envoy_gateway_no_default_bind = true + envoy_gateway_bind_addresses { + all-interfaces { + address = "0.0.0.0" + } + } + } + } + checks = [ + { + name = "Ingress Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:21000" + deregister_critical_service_after = "6h" + } + ] + } + EOF + + /consul-bin/consul services register \ + {{- if $root.Values.global.acls.manageSystemACLs }} + -token-file=/consul/service/acl-token \ + {{- end }} + /consul/service/service.hcl + volumeMounts: + - name: consul-service + mountPath: /consul/service + - name: consul-bin + mountPath: /consul-bin + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert {{- end }} - {{- if and $root.Values.externalServers.enabled $root.Values.externalServers.tlsServerName }} - -tls-server-name={{ $root.Values.externalServers.tlsServerName }} \ - {{- else if $root.Values.global.cloud.enabled }} - -tls-server-name=server.{{ $root.Values.global.datacenter}}.{{ $root.Values.global.domain}} \ + mountPath: /consul/tls/ca + readOnly: true {{- end }} + resources: + requests: + memory: "50Mi" + cpu: "50m" + limits: + memory: "50Mi" + cpu: "50m" + containers: + - name: ingress-gateway + image: {{ $root.Values.global.imageEnvoy | quote }} + {{- if (default $defaults.resources .resources) }} + resources: {{ toYaml (default $defaults.resources .resources) | nindent 12 }} + {{- end }} + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + - name: consul-service + mountPath: /consul/service + readOnly: true + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if $root.Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/service/acl-token" + {{- end}} + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_GRPC_ADDR + value: https://$(HOST_IP):8502 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt {{- else }} - -tls-disabled \ + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + - name: CONSUL_GRPC_ADDR + value: $(HOST_IP):8502 + {{- end }} + command: + - /consul-bin/consul + - connect + - envoy + - -gateway=ingress + - -proxy-id=$(POD_NAME) + - -address=$(POD_IP):21000 + {{- if $root.Values.global.enableConsulNamespaces }} + - -namespace={{ default $defaults.consulNamespace .consulNamespace }} {{- end }} - {{- if $root.Values.global.acls.manageSystemACLs }} - -credential-type=login \ - -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token \ - -login-meta=component=ingress-gateway \ - -login-meta=pod=${NAMESPACE}/${POD_NAME} \ - -login-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method \ {{- if $root.Values.global.adminPartitions.enabled }} - -login-partition={{ $root.Values.global.adminPartitions.name }} \ + - -partition={{ $root.Values.global.adminPartitions.name }} {{- end }} + livenessProbe: + tcpSocket: + port: 21000 + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + tcpSocket: + port: 21000 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + ports: + - name: gateway-health + containerPort: 21000 + {{- range $index, $allPorts := (default $defaults.service.ports $service.ports) }} + - name: gateway-{{ $index }} + containerPort: {{ $allPorts.port }} {{- end }} - {{- if $root.Values.global.adminPartitions.enabled }} - -service-partition={{ $root.Values.global.adminPartitions.name }} \ + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + /consul-bin/consul services deregister \ + {{- if $root.Values.global.enableConsulNamespaces }} + -namespace={{ default $defaults.consulNamespace .consulNamespace }} \ + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -id="${POD_NAME}" + {{- if $root.Values.global.acls.manageSystemACLs }} + - "/consul-bin/consul logout" + {{- end}} + + # consul-sidecar ensures the ingress gateway is always registered with + # the local Consul agent, even if it loses the initial registration. + - name: consul-sidecar + image: {{ $root.Values.global.imageK8S }} + volumeMounts: + - name: consul-service + mountPath: /consul/service + readOnly: true + - name: consul-bin + mountPath: /consul-bin + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert {{- end }} - -log-level={{ default $root.Values.global.logLevel }} \ - -log-json={{ $root.Values.global.logJSON }} \ - {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} - -telemetry-prom-scrape-path="/metrics" + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if $root.Values.global.consulSidecarContainer }} + {{- if $root.Values.global.consulSidecarContainer.resources }} + resources: {{ toYaml $root.Values.global.consulSidecarContainer.resources | nindent 12 }} + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + command: + - consul-k8s-control-plane + - consul-sidecar + - -log-level={{ $root.Values.global.logLevel }} + - -log-json={{ $root.Values.global.logJSON }} + - -service-config=/consul/service/service.hcl + - -consul-binary=/consul-bin/consul + - -consul-api-timeout={{ $root.Values.global.consulAPITimeout }} + {{- if $root.Values.global.acls.manageSystemACLs }} + - -token-file=/consul/service/acl-token {{- end }} - livenessProbe: - tcpSocket: - port: 21000 - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - readinessProbe: - tcpSocket: - port: 21000 - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - ports: - - name: gateway-health - containerPort: 21000 - {{- range $index, $allPorts := (default $defaults.service.ports $service.ports) }} - - name: gateway-{{ $index }} - containerPort: {{ $allPorts.port }} - {{- end }} {{- if (default $defaults.priorityClassName .priorityClassName) }} priorityClassName: {{ default $defaults.priorityClassName .priorityClassName | quote }} {{- end }} diff --git a/charts/consul/templates/mesh-gateway-deployment.yaml b/charts/consul/templates/mesh-gateway-deployment.yaml index 460bd06fa3..ffe0036ded 100644 --- a/charts/consul/templates/mesh-gateway-deployment.yaml +++ b/charts/consul/templates/mesh-gateway-deployment.yaml @@ -1,13 +1,13 @@ {{- if .Values.meshGateway.enabled }} {{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true" }}{{ end -}} {{- if and .Values.global.acls.manageSystemACLs (ne .Values.meshGateway.consulServiceName "") (ne .Values.meshGateway.consulServiceName "mesh-gateway") }}{{ fail "if global.acls.manageSystemACLs is true, meshGateway.consulServiceName cannot be set" }}{{ end -}} +{{- if .Values.meshGateway.imageEnvoy }}{{ fail "meshGateway.imageEnvoy must be specified in global.imageEnvoy" }}{{ end -}} {{- if .Values.meshGateway.globalMode }}{{ fail "meshGateway.globalMode is no longer supported; instead, you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" }}{{ end -}} +{{- if .Values.global.lifecycleSidecarContainer }}{{ fail "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." }}{{ end }} +{{- /* The below test checks if clients are disabled (and if so, fails). We use the conditional from other client files and prepend 'not' */ -}} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled" }}{{ end -}} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} -{{- if and (eq .Values.meshGateway.wanAddress.source "Static") (eq .Values.meshGateway.wanAddress.static "") }}{{ fail "if meshGateway.wanAddress.source=Static then meshGateway.wanAddress.static cannot be empty" }}{{ end }} -{{- if and (eq .Values.meshGateway.wanAddress.source "Service") (eq .Values.meshGateway.service.type "NodePort") (not .Values.meshGateway.service.nodePort) }}{{ fail "if meshGateway.wanAddress.source=Service and meshGateway.service.type=NodePort, meshGateway.service.nodePort must be set" }}{{ end }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} - apiVersion: apps/v1 kind: Deployment metadata: @@ -34,23 +34,8 @@ spec: chart: {{ template "consul.chart" . }} release: {{ .Release.Name }} component: mesh-gateway - consul.hashicorp.com/connect-inject-managed-by: consul-k8s-endpoints-controller annotations: "consul.hashicorp.com/connect-inject": "false" - "consul.hashicorp.com/gateway-kind": "mesh-gateway" - "consul.hashicorp.com/gateway-consul-service-name": "{{ .Values.meshGateway.consulServiceName }}" - "consul.hashicorp.com/mesh-gateway-container-port": "{{ .Values.meshGateway.containerPort }}" - "consul.hashicorp.com/gateway-wan-address-source": "{{ .Values.meshGateway.wanAddress.source }}" - "consul.hashicorp.com/gateway-wan-address-static": "{{ .Values.meshGateway.wanAddress.static }}" - {{- if eq .Values.meshGateway.wanAddress.source "Service" }} - {{- if eq .Values.meshGateway.service.type "NodePort" }} - "consul.hashicorp.com/gateway-wan-port": "{{ .Values.meshGateway.service.nodePort }}" - {{- else }} - "consul.hashicorp.com/gateway-wan-port": "{{ .Values.meshGateway.service.port }}" - {{- end }} - {{- else }} - "consul.hashicorp.com/gateway-wan-port": "{{ .Values.meshGateway.wanAddress.port }}" - {{- end }} {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} "vault.hashicorp.com/agent-init-first": "true" "vault.hashicorp.com/agent-inject": "true" @@ -89,23 +74,30 @@ spec: terminationGracePeriodSeconds: 10 serviceAccountName: {{ template "consul.fullname" . }}-mesh-gateway volumes: - - name: consul-service - emptyDir: - medium: "Memory" - {{- if .Values.global.tls.enabled }} - {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - - name: consul-ca-cert - secret: - {{- if .Values.global.tls.caCert.secretName }} - secretName: {{ .Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" . }}-ca-cert - {{- end }} - items: - - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} - path: tls.crt - {{- end }} - {{- end }} + - name: consul-bin + emptyDir: {} + - name: consul-service + emptyDir: + medium: "Memory" + {{- if .Values.global.tls.enabled }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} {{- if .Values.meshGateway.hostNetwork }} hostNetwork: {{ .Values.meshGateway.hostNetwork }} {{- end }} @@ -113,159 +105,314 @@ spec: dnsPolicy: {{ .Values.meshGateway.dnsPolicy }} {{- end }} initContainers: - - name: mesh-gateway-init - image: {{ .Values.global.imageK8S }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} - {{- else }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method + # We use the Envoy image as our base image so we use an init container to + # copy the Consul binary to a shared directory that can be used when + # starting Envoy. + - name: copy-consul-bin + image: {{ .Values.global.image | quote }} + command: + - cp + - /bin/consul + - /consul-bin/consul + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.meshGateway.initCopyConsulContainer }} + {{- if .Values.meshGateway.initCopyConsulContainer.resources }} + resources: {{ toYaml .Values.meshGateway.initCopyConsulContainer.resources | nindent 12 }} {{- end }} - - name: CONSUL_LOGIN_DATACENTER - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ .Values.global.federation.primaryDatacenter }} - {{- else }} - value: {{ .Values.global.datacenter }} {{- end }} - - name: CONSUL_LOGIN_META - value: "component=mesh-gateway,pod=$(NAMESPACE)/$(POD_NAME)" - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${NAMESPACE} \ - -gateway-kind="mesh-gateway" \ - -consul-node-name="k8s-service-mesh" \ - -proxy-id-file=/consul/service/proxy-id \ - -service-name={{ .Values.meshGateway.consulServiceName }} \ - -log-level={{ default .Values.global.logLevel }} \ - -log-json={{ .Values.global.logJSON }} - volumeMounts: - - name: consul-service - mountPath: /consul/service - {{- if and .Values.global.tls.enabled (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- if .Values.meshGateway.initServiceInitContainer.resources }} - resources: {{ toYaml .Values.meshGateway.initServiceInitContainer.resources | nindent 10 }} + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 8 }} {{- end }} + - name: mesh-gateway-init + image: {{ .Values.global.imageK8S }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + {{- if .Values.global.acls.manageSystemACLs }} + consul-k8s-control-plane acl-init \ + -component-name=mesh-gateway \ + -token-sink-file=/consul/service/acl-token \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + {{ end }} + + {{- $source := .Values.meshGateway.wanAddress.source }} + {{- $serviceType := .Values.meshGateway.service.type }} + {{- if and (eq $source "Service") (not .Values.meshGateway.service.enabled) }}{{ fail "if meshGateway.wanAddress.source=Service then meshGateway.service.enabled must be set to true" }}{{ end }} + {{- if or (eq $source "NodeIP") (and (eq $source "Service") (eq $serviceType "NodePort")) }} + WAN_ADDR="${HOST_IP}" + {{- else if eq $source "NodeName" }} + WAN_ADDR="${NODE_NAME}" + {{- else if and (eq $source "Service") (or (eq $serviceType "ClusterIP") (eq $serviceType "LoadBalancer")) }} + consul-k8s-control-plane service-address \ + -log-level={{ .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} \ + -k8s-namespace={{ .Release.Namespace }} \ + -name={{ template "consul.fullname" . }}-mesh-gateway \ + -output-file=/tmp/address.txt + WAN_ADDR="$(cat /tmp/address.txt)" + {{- else if eq $source "Static" }} + {{- if eq .Values.meshGateway.wanAddress.static "" }}{{ fail "if meshGateway.wanAddress.source=Static then meshGateway.wanAddress.static cannot be empty" }}{{ end }} + WAN_ADDR="{{ .Values.meshGateway.wanAddress.static }}" + {{- else }} + {{- fail "currently set meshGateway values for wanAddress.source and service.type are not supported" }} + {{- end }} + + {{- if eq $source "Service" }} + {{- if eq $serviceType "NodePort" }} + {{- if not .Values.meshGateway.service.nodePort }}{{ fail "if meshGateway.wanAddress.source=Service and meshGateway.service.type=NodePort, meshGateway.service.nodePort must be set" }}{{ end }} + WAN_PORT="{{ .Values.meshGateway.service.nodePort }}" + {{- else }} + WAN_PORT="{{ .Values.meshGateway.service.port }}" + {{- end }} + {{- else }} + WAN_PORT="{{ .Values.meshGateway.wanAddress.port }}" + {{- end }} + + cat > /consul/service/service.hcl << EOF + service { + kind = "mesh-gateway" + name = "{{ .Values.meshGateway.consulServiceName }}" + {{- if .Values.global.federation.enabled }} + meta { + consul-wan-federation = "1" + } + {{- end }} + {{- if (and .Values.global.metrics.enabled .Values.global.metrics.enableGatewayMetrics) }} + proxy { config { envoy_prometheus_bind_addr = "${POD_IP}:20200" } } + {{- end }} + port = {{ .Values.meshGateway.containerPort }} + address = "${POD_IP}" + {{- if .Values.global.adminPartitions.enabled }} + partition = "{{ .Values.global.adminPartitions.name }}" + {{- end }} + tagged_addresses { + lan { + address = "${POD_IP}" + port = {{ .Values.meshGateway.containerPort }} + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:{{ .Values.meshGateway.containerPort }}" + deregister_critical_service_after = "6h" + } + ] + } + EOF + + /consul-bin/consul services register \ + {{- if .Values.global.acls.manageSystemACLs }} + -token-file=/consul/service/acl-token \ + {{- end }} + /consul/service/service.hcl + volumeMounts: + - name: consul-service + mountPath: /consul/service + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if .Values.meshGateway.initServiceInitContainer.resources }} + resources: {{ toYaml .Values.meshGateway.initServiceInitContainer.resources | nindent 12 }} + {{- end }} containers: - - name: mesh-gateway - image: {{ .Values.global.imageConsulDataplane | quote }} - {{- if .Values.meshGateway.resources }} - resources: + - name: mesh-gateway + image: {{ .Values.global.imageEnvoy | quote }} + {{- if .Values.meshGateway.resources }} + resources: {{- if eq (typeOf .Values.meshGateway.resources) "string" }} {{ tpl .Values.meshGateway.resources . | nindent 12 | trim }} {{- else }} {{- toYaml .Values.meshGateway.resources | nindent 12 }} {{- end }} - {{- end }} - volumeMounts: - - mountPath: /consul/service - name: consul-service - readOnly: true - {{- if and .Values.global.tls.enabled (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - command: - - /bin/sh - - -ec - - | - consul-dataplane \ - {{- if .Values.externalServers.enabled }} - -addresses={{ .Values.externalServers.hosts | first | quote }} \ - {{- else }} - -addresses="{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc" \ + {{- end }} + volumeMounts: + - mountPath: /consul/service + name: consul-service + readOnly: true + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if eq .Values.meshGateway.wanAddress.source "NodeName" }} + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName {{- end }} - {{- if .Values.externalServers.enabled }} - -grpc-port={{ .Values.externalServers.grpcPort }} \ - {{- else }} - -grpc-port=8502 \ + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: /consul/service/acl-token {{- end }} - -proxy-service-id=$POD_NAME \ - -service-node-name="k8s-service-mesh" \ {{- if .Values.global.tls.enabled }} - {{- if (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} - -ca-certs=/consul/tls/ca/tls.crt \ - {{- end }} - {{- if and .Values.externalServers.enabled .Values.externalServers.tlsServerName }} - -tls-server-name={{.Values.externalServers.tlsServerName }} \ - {{- else if .Values.global.cloud.enabled }} - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} \ - {{- end }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_GRPC_ADDR + value: https://$(HOST_IP):8502 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt {{- else }} - -tls-disabled \ - {{- end }} - {{- if .Values.global.acls.manageSystemACLs }} - -credential-type=login \ - -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token \ - -login-meta=component=mesh-gateway \ - -login-meta=pod=${NAMESPACE}/${POD_NAME} \ - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - -login-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ - -login-datacenter={{ .Values.global.federation.primaryDatacenter }} \ - {{- else }} - -login-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + - name: CONSUL_GRPC_ADDR + value: $(HOST_IP):8502 {{- end }} + command: + - /consul-bin/consul + - connect + - envoy + - -mesh-gateway {{- if .Values.global.adminPartitions.enabled }} - -login-partition={{ .Values.global.adminPartitions.name }} \ - {{- end }} + - -partition={{ .Values.global.adminPartitions.name }} {{- end }} - {{- if .Values.global.adminPartitions.enabled }} - -service-partition={{ .Values.global.adminPartitions.name }} \ + livenessProbe: + tcpSocket: + port: {{ .Values.meshGateway.containerPort }} + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + tcpSocket: + port: {{ .Values.meshGateway.containerPort }} + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + ports: + - name: gateway + containerPort: {{ .Values.meshGateway.containerPort }} + {{- if .Values.meshGateway.hostPort }} + hostPort: {{ .Values.meshGateway.hostPort }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - "/consul-bin/consul services deregister -id=\"{{ .Values.meshGateway.consulServiceName }}\"" + {{- if .Values.global.acls.manageSystemACLs }} + - "/consul-bin/consul logout" + {{- end}} + + # consul-sidecar ensures the mesh gateway is always registered with + # the local Consul agent, even if it loses the initial registration. + - name: consul-sidecar + image: {{ .Values.global.imageK8S }} + volumeMounts: + - name: consul-service + mountPath: /consul/service + readOnly: true + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert {{- end }} - -log-level={{ default .Values.global.logLevel }} \ - -log-json={{ .Values.global.logJSON }} \ - {{- if (and .Values.global.metrics.enabled .Values.global.metrics.enableGatewayMetrics) }} - -telemetry-prom-scrape-path="/metrics" + mountPath: /consul/tls/ca + readOnly: true {{- end }} - livenessProbe: - tcpSocket: - port: {{ .Values.meshGateway.containerPort }} - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - readinessProbe: - tcpSocket: - port: {{ .Values.meshGateway.containerPort }} - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - ports: - - name: gateway - containerPort: {{ .Values.meshGateway.containerPort }} - {{- if .Values.meshGateway.hostPort }} - hostPort: {{ .Values.meshGateway.hostPort }} + {{- if .Values.global.consulSidecarContainer }} + {{- if .Values.global.consulSidecarContainer.resources }} + resources: {{ toYaml .Values.global.consulSidecarContainer.resources | nindent 12 }} + {{- end }} {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + command: + - consul-k8s-control-plane + - consul-sidecar + - -log-level={{ .Values.global.logLevel }} + - -log-json={{ .Values.global.logJSON }} + - -service-config=/consul/service/service.hcl + - -consul-binary=/consul-bin/consul + - -consul-api-timeout={{ .Values.global.consulAPITimeout }} + {{- if .Values.global.acls.manageSystemACLs }} + - -token-file=/consul/service/acl-token + {{- end }} {{- if .Values.meshGateway.priorityClassName }} priorityClassName: {{ .Values.meshGateway.priorityClassName | quote }} {{- end }} diff --git a/charts/consul/templates/mesh-gateway-service.yaml b/charts/consul/templates/mesh-gateway-service.yaml index 5fdceca8df..7bd7ec2acc 100644 --- a/charts/consul/templates/mesh-gateway-service.yaml +++ b/charts/consul/templates/mesh-gateway-service.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.meshGateway.enabled }} +{{- if and .Values.meshGateway.enabled .Values.meshGateway.service.enabled }} apiVersion: v1 kind: Service metadata: diff --git a/charts/consul/templates/partition-init-job.yaml b/charts/consul/templates/partition-init-job.yaml index 082c48447b..4d6d971743 100644 --- a/charts/consul/templates/partition-init-job.yaml +++ b/charts/consul/templates/partition-init-job.yaml @@ -3,7 +3,6 @@ {{- template "consul.reservedNamesFailer" (list .Values.global.adminPartitions.name "global.adminPartitions.name") }} {{- if and (not .Values.externalServers.enabled) (ne .Values.global.adminPartitions.name "default") }}{{ fail "externalServers.enabled needs to be true and configured to create a non-default partition." }}{{ end -}} {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.manageSystemACLs (not .Values.global.secretsBackend.vault.adminPartitionsRole) }}{{ fail "global.secretsBackend.vault.adminPartitionsRole is required when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true." }}{{ end -}} -{{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} apiVersion: batch/v1 kind: Job metadata: @@ -76,19 +75,22 @@ spec: - name: partition-init-job image: {{ .Values.global.imageK8S }} env: - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 10 }} - {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} - {{- if .Values.global.secretsBackend.vault.enabled }} - - name: CONSUL_ACL_TOKEN_FILE - value: /vault/secrets/bootstrap-token - {{- else }} - - name: CONSUL_ACL_TOKEN - valueFrom: - secretKeyRef: - name: {{ .Values.global.acls.bootstrapToken.secretName }} - key: {{ .Values.global.acls.bootstrapToken.secretKey }} - {{- end }} - {{- end }} + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + - name: CONSUL_HTTP_TOKEN_FILE + value: /vault/secrets/bootstrap-token + {{- else }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.global.acls.bootstrapToken.secretName }} + key: {{ .Values.global.acls.bootstrapToken.secretKey }} + {{- end }} + {{- end }} {{- if .Values.global.tls.enabled }} {{- if not (or .Values.externalServers.useSystemRoots .Values.global.secretsBackend.vault.enabled) }} volumeMounts: @@ -102,11 +104,30 @@ spec: - "-ec" - | consul-k8s-control-plane partition-init \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ .Values.global.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ - {{- if .Values.global.cloud.enabled }} - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} \ + + {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} + {{- range .Values.externalServers.hosts }} + -server-address={{ quote . }} \ + {{- end }} + -server-port={{ .Values.externalServers.httpsPort }} \ + + {{- if .Values.global.tls.enabled }} + -use-https \ + {{- if not .Values.externalServers.useSystemRoots }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -ca-file=/vault/secrets/serverca.crt \ + {{- else }} + -ca-file=/consul/tls/ca/tls.crt \ + {{- end }} + {{- end }} + {{- if .Values.externalServers.tlsServerName }} + -tls-server-name={{ .Values.externalServers.tlsServerName }} \ + {{- end }} {{- end }} + -partition-name={{ .Values.global.adminPartitions.name }} resources: requests: memory: "50Mi" diff --git a/charts/consul/templates/partition-service.yaml b/charts/consul/templates/partition-service.yaml new file mode 100644 index 0000000000..b9266a11c7 --- /dev/null +++ b/charts/consul/templates/partition-service.yaml @@ -0,0 +1,45 @@ +{{- $serverEnabled := (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) -}} +{{- if (and .Values.global.adminPartitions.enabled $serverEnabled) }} +# Service with an external IP for clients in non-default Admin Partitions +# to discover Consul servers. This service should only point to Consul servers. +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-partition + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: server + annotations: + {{- if .Values.global.adminPartitions.service.annotations }} + {{ tpl .Values.global.adminPartitions.service.annotations . | nindent 4 | trim }} + {{- end }} +spec: + type: "{{ .Values.global.adminPartitions.service.type }}" + ports: + - name: https + port: 8501 + targetPort: 8501 + {{ if (and (eq .Values.global.adminPartitions.service.type "NodePort") .Values.global.adminPartitions.service.nodePort.https) }} + nodePort: {{ .Values.global.adminPartitions.service.nodePort.https }} + {{- end }} + - name: serflan + port: 8301 + targetPort: 8301 + {{ if (and (eq .Values.global.adminPartitions.service.type "NodePort") .Values.global.adminPartitions.service.nodePort.serf) }} + nodePort: {{ .Values.global.adminPartitions.service.nodePort.serf }} + {{- end }} + - name: server + port: 8300 + targetPort: 8300 + {{ if (and (eq .Values.global.adminPartitions.service.type "NodePort") .Values.global.adminPartitions.service.nodePort.rpc) }} + nodePort: {{ .Values.global.adminPartitions.service.nodePort.rpc }} + {{- end }} + selector: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: server +{{- end }} diff --git a/charts/consul/templates/server-acl-init-job.yaml b/charts/consul/templates/server-acl-init-job.yaml index 3a57f096ec..b2f770db7a 100644 --- a/charts/consul/templates/server-acl-init-job.yaml +++ b/charts/consul/templates/server-acl-init-job.yaml @@ -7,8 +7,6 @@ {{- if or (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.acls.bootstrapToken.secretKey)) (and .Values.global.acls.bootstrapToken.secretKey (not .Values.global.acls.bootstrapToken.secretName))}}{{ fail "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided" }}{{ end -}} {{- if or (and .Values.global.acls.replicationToken.secretName (not .Values.global.acls.replicationToken.secretKey)) (and .Values.global.acls.replicationToken.secretKey (not .Values.global.acls.replicationToken.secretName))}}{{ fail "both global.acls.replicationToken.secretKey and global.acls.replicationToken.secretName must be set if one of them is provided" }}{{ end -}} {{- if (and .Values.global.secretsBackend.vault.enabled (and (not .Values.global.acls.bootstrapToken.secretName) (not .Values.global.acls.replicationToken.secretName ))) }}{{fail "global.acls.bootstrapToken or global.acls.replicationToken must be provided when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true" }}{{ end -}} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} {{- if (and .Values.global.secretsBackend.vault.enabled (not .Values.global.secretsBackend.vault.manageSystemACLsRole)) }}{{fail "global.secretsBackend.vault.manageSystemACLsRole is required when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true" }}{{ end -}} {{- /* We don't render this job when server.updatePartition > 0 because that means a server rollout is in progress and this job won't complete unless @@ -70,7 +68,7 @@ spec: {{- end }} {{- if .Values.global.acls.replicationToken.secretName }} "vault.hashicorp.com/agent-inject-secret-replication-token": "{{ .Values.global.acls.replicationToken.secretName }}" - "vault.hashicorp.com/agent-inject-template-replication-token": {{ template "consul.vaultReplicationTokenTemplate" . }} + "vault.hashicorp.com/agent-inject-template-replication-token": {{ template "consul.vaultReplicationTokenTemplate" . }} {{- end }} {{- if .Values.global.secretsBackend.vault.agentAnnotations }} {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} @@ -81,230 +79,255 @@ spec: serviceAccountName: {{ template "consul.fullname" . }}-server-acl-init {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} volumes: - {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} - {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - - name: consul-ca-cert - secret: - {{- if .Values.global.tls.caCert.secretName }} - secretName: {{ .Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" . }}-ca-cert - {{- end }} - items: - - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} - path: tls.crt - {{- end }} - {{- end }} - {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} - - name: bootstrap-token - secret: - secretName: {{ .Values.global.acls.bootstrapToken.secretName }} - items: - - key: {{ .Values.global.acls.bootstrapToken.secretKey }} - path: bootstrap-token - {{- else if and .Values.global.acls.replicationToken.secretName (not .Values.global.secretsBackend.vault.enabled) }} - - name: acl-replication-token - secret: - secretName: {{ .Values.global.acls.replicationToken.secretName }} - items: - - key: {{ .Values.global.acls.replicationToken.secretKey }} - path: acl-replication-token - {{- end }} - {{- end }} - containers: - - name: server-acl-init-job - image: {{ .Values.global.imageK8S }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} - {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} - volumeMounts: {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} - {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt {{- end }} {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} - name: bootstrap-token - mountPath: /consul/acl/tokens - readOnly: true + secret: + secretName: {{ .Values.global.acls.bootstrapToken.secretName }} + items: + - key: {{ .Values.global.acls.bootstrapToken.secretKey }} + path: bootstrap-token {{- else if and .Values.global.acls.replicationToken.secretName (not .Values.global.secretsBackend.vault.enabled) }} - name: acl-replication-token - mountPath: /consul/acl/tokens - readOnly: true + secret: + secretName: {{ .Values.global.acls.replicationToken.secretName }} + items: + - key: {{ .Values.global.acls.replicationToken.secretKey }} + path: acl-replication-token {{- end }} - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - CONSUL_FULLNAME="{{template "consul.fullname" . }}" + {{- end }} + containers: + - name: post-install-job + image: {{ .Values.global.imageK8S }} + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} + volumeMounts: + {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} + - name: bootstrap-token + mountPath: /consul/acl/tokens + readOnly: true + {{- else if and .Values.global.acls.replicationToken.secretName (not .Values.global.secretsBackend.vault.enabled) }} + - name: acl-replication-token + mountPath: /consul/acl/tokens + readOnly: true + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + CONSUL_FULLNAME="{{template "consul.fullname" . }}" - consul-k8s-control-plane server-acl-init \ - -log-level={{ .Values.global.logLevel }} \ - -log-json={{ .Values.global.logJSON }} \ - -resource-prefix=${CONSUL_FULLNAME} \ - -k8s-namespace={{ .Release.Namespace }} \ - -set-server-tokens={{ $serverEnabled }} \ + consul-k8s-control-plane server-acl-init \ + -log-level={{ .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} \ + -resource-prefix=${CONSUL_FULLNAME} \ + -k8s-namespace={{ .Release.Namespace }} \ + -set-server-tokens={{ $serverEnabled }} \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ - {{- if .Values.global.acls.bootstrapToken.secretName }} - {{- if .Values.global.secretsBackend.vault.enabled }} - -bootstrap-token-file=/vault/secrets/bootstrap-token \ - {{- else }} - -bootstrap-token-file=/consul/acl/tokens/bootstrap-token \ - {{- end }} - {{- end }} + {{- if .Values.externalServers.enabled }} + {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} + {{- range .Values.externalServers.hosts }} + -server-address={{ quote . }} \ + {{- end }} + -server-port={{ .Values.externalServers.httpsPort }} \ + {{- else }} + {{- range $index := until (.Values.server.replicas | int) }} + -server-address="${CONSUL_FULLNAME}-server-{{ $index }}.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc" \ + {{- end }} + {{- end }} - {{- if .Values.syncCatalog.enabled }} - -sync-catalog=true \ - {{- if .Values.syncCatalog.consulNodeName }} - -sync-consul-node-name={{ .Values.syncCatalog.consulNodeName }} \ - {{- end }} - {{- end }} + {{- if .Values.global.tls.enabled }} + -use-https \ + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -consul-ca-cert=/vault/secrets/serverca.crt \ + {{- else }} + -consul-ca-cert=/consul/tls/ca/tls.crt \ + {{- end }} + {{- end }} + {{- if not .Values.externalServers.enabled }} + -server-port=8501 \ + {{- end }} + {{- if .Values.externalServers.tlsServerName }} + -consul-tls-server-name={{ .Values.externalServers.tlsServerName }} \ + {{- end }} + {{- end }} - {{- if .Values.global.peering.enabled }} - -enable-peering=true \ - {{- end }} - {{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.global.enabled)) }} - -allow-dns=true \ - {{- end }} + {{- if .Values.syncCatalog.enabled }} + -sync-catalog=true \ + {{- if .Values.syncCatalog.consulNodeName }} + -sync-consul-node-name={{ .Values.syncCatalog.consulNodeName }} \ + {{- end }} + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -enable-partitions=true \ + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + {{- if .Values.global.peering.enabled }} + -enable-peering=true \ + {{- end }} + {{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.global.enabled)) }} + -allow-dns=true \ + {{- end }} - {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} - -connect-inject=true \ - {{- end }} - {{- if and .Values.externalServers.enabled .Values.externalServers.k8sAuthMethodHost }} - -auth-method-host={{ .Values.externalServers.k8sAuthMethodHost }} \ - {{- end }} + {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} + -connect-inject=true \ + {{- end }} + {{- if and .Values.externalServers.enabled .Values.externalServers.k8sAuthMethodHost }} + -auth-method-host={{ .Values.externalServers.k8sAuthMethodHost }} \ + {{- end }} - {{- if .Values.global.federation.k8sAuthMethodHost }} - -auth-method-host={{ .Values.global.federation.k8sAuthMethodHost }} \ - {{- end }} + {{- if .Values.global.federation.k8sAuthMethodHost }} + -auth-method-host={{ .Values.global.federation.k8sAuthMethodHost }} \ + {{- end }} - {{- if .Values.meshGateway.enabled }} - -mesh-gateway=true \ - {{- end }} + {{- if .Values.meshGateway.enabled }} + -mesh-gateway=true \ + {{- end }} - {{- if .Values.ingressGateways.enabled }} - {{- if .Values.global.enableConsulNamespaces }} - {{- $root := . }} - {{- range .Values.ingressGateways.gateways }} - {{- if (or $root.Values.ingressGateways.defaults.consulNamespace .consulNamespace) }} - -ingress-gateway-name="{{ .name }}.{{ (default $root.Values.ingressGateways.defaults.consulNamespace .consulNamespace) }}" \ - {{- else }} - -ingress-gateway-name="{{ .name }}" \ - {{- end }} - {{- end }} - {{- else }} - {{- range .Values.ingressGateways.gateways }} - -ingress-gateway-name="{{ .name }}" \ - {{- end }} - {{- end }} - {{- end }} + {{- if .Values.ingressGateways.enabled }} + {{- if .Values.global.enableConsulNamespaces }} + {{- $root := . }} + {{- range .Values.ingressGateways.gateways }} + {{- if (or $root.Values.ingressGateways.defaults.consulNamespace .consulNamespace) }} + -ingress-gateway-name="{{ .name }}.{{ (default $root.Values.ingressGateways.defaults.consulNamespace .consulNamespace) }}" \ + {{- else }} + -ingress-gateway-name="{{ .name }}" \ + {{- end }} + {{- end }} + {{- else }} + {{- range .Values.ingressGateways.gateways }} + -ingress-gateway-name="{{ .name }}" \ + {{- end }} + {{- end }} + {{- end }} - {{- if .Values.terminatingGateways.enabled }} - {{- if .Values.global.enableConsulNamespaces }} - {{- $root := . }} - {{- range .Values.terminatingGateways.gateways }} - {{- if (or $root.Values.terminatingGateways.defaults.consulNamespace .consulNamespace) }} - -terminating-gateway-name="{{ .name }}.{{ (default $root.Values.terminatingGateways.defaults.consulNamespace .consulNamespace) }}" \ - {{- else }} - -terminating-gateway-name="{{ .name }}" \ - {{- end }} - {{- end }} - {{- else }} - {{- range .Values.terminatingGateways.gateways }} - -terminating-gateway-name="{{ .name }}" \ - {{- end }} - {{- end }} - {{- end }} + {{- if .Values.terminatingGateways.enabled }} + {{- if .Values.global.enableConsulNamespaces }} + {{- $root := . }} + {{- range .Values.terminatingGateways.gateways }} + {{- if (or $root.Values.terminatingGateways.defaults.consulNamespace .consulNamespace) }} + -terminating-gateway-name="{{ .name }}.{{ (default $root.Values.terminatingGateways.defaults.consulNamespace .consulNamespace) }}" \ + {{- else }} + -terminating-gateway-name="{{ .name }}" \ + {{- end }} + {{- end }} + {{- else }} + {{- range .Values.terminatingGateways.gateways }} + -terminating-gateway-name="{{ .name }}" \ + {{- end }} + {{- end }} + {{- end }} - {{- if .Values.connectInject.aclBindingRuleSelector }} - -acl-binding-rule-selector={{ .Values.connectInject.aclBindingRuleSelector }} \ - {{- end }} + {{- if .Values.connectInject.aclBindingRuleSelector }} + -acl-binding-rule-selector={{ .Values.connectInject.aclBindingRuleSelector }} \ + {{- end }} - {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey) }} - -create-enterprise-license-token=true \ - {{- end }} + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey) }} + -create-enterprise-license-token=true \ + {{- end }} - {{- if .Values.server.snapshotAgent.enabled }} - -snapshot-agent=true \ - {{- end }} + {{- if .Values.client.snapshotAgent.enabled }} + -snapshot-agent=true \ + {{- end }} - {{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} - -client=false \ - {{- end }} + {{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} + -client=false \ + {{- end }} - {{- if .Values.global.acls.createReplicationToken }} - -create-acl-replication-token=true \ - {{- end }} + {{- if .Values.global.acls.createReplicationToken }} + -create-acl-replication-token=true \ + {{- end }} - {{- if .Values.global.federation.enabled }} - -federation=true \ - {{- end }} + {{- if .Values.global.federation.enabled }} + -federation=true \ + {{- end }} - {{- if .Values.global.acls.replicationToken.secretName }} - {{- if .Values.global.secretsBackend.vault.enabled }} - -acl-replication-token-file=/vault/secrets/replication-token \ - {{- else }} - -acl-replication-token-file=/consul/acl/tokens/acl-replication-token \ - {{- end }} - {{- end }} - {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.partitionToken.secretName }} - -partition-token-file=/vault/secrets/partition-token \ - {{- end }} + {{- if .Values.global.acls.bootstrapToken.secretName }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -bootstrap-token-file=/vault/secrets/bootstrap-token \ + {{- else }} + -bootstrap-token-file=/consul/acl/tokens/bootstrap-token \ + {{- end }} + {{- end }} + {{- if .Values.global.acls.replicationToken.secretName }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -acl-replication-token-file=/vault/secrets/replication-token \ + {{- else }} + -acl-replication-token-file=/consul/acl/tokens/acl-replication-token \ + {{- end }} + {{- end }} + {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.partitionToken.secretName }} + -partition-token-file=/vault/secrets/partition-token \ + {{- end }} - {{- if .Values.controller.enabled }} - -controller=true \ - {{- end }} + {{- if .Values.controller.enabled }} + -controller=true \ + {{- end }} - {{- if .Values.apiGateway.enabled }} - -api-gateway-controller=true \ - {{- end }} + {{- if .Values.apiGateway.enabled }} + -api-gateway-controller=true \ + {{- end }} - {{- if .Values.global.enableConsulNamespaces }} - -enable-namespaces=true \ - {{- /* syncCatalog must be enabled to set sync flags */}} - {{- if (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} - {{- if .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} - -consul-sync-destination-namespace={{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} \ - {{- end }} - {{- if .Values.syncCatalog.consulNamespaces.mirroringK8S }} - -enable-sync-k8s-namespace-mirroring=true \ - {{- if .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} - -sync-k8s-namespace-mirroring-prefix={{ .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} \ - {{- end }} - {{- end }} - {{- end }} + {{- if .Values.global.enableConsulNamespaces }} + -enable-namespaces=true \ - {{- /* connectInject must be enabled to set inject flags */}} - {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} - {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} - -consul-inject-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ - {{- end }} - {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} - -enable-inject-k8s-namespace-mirroring=true \ - {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} - -inject-k8s-namespace-mirroring-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ - {{- end }} - {{- end }} - {{- end }} - {{- end }} - resources: - requests: - memory: "50Mi" - cpu: "50m" - limits: - memory: "50Mi" - cpu: "50m" + {{- /* syncCatalog must be enabled to set sync flags */}} + {{- if (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} + {{- if .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} + -consul-sync-destination-namespace={{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8S }} + -enable-sync-k8s-namespace-mirroring=true \ + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} + -sync-k8s-namespace-mirroring-prefix={{ .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- end }} + + {{- /* connectInject must be enabled to set inject flags */}} + {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} + {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + -consul-inject-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + -enable-inject-k8s-namespace-mirroring=true \ + {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} + -inject-k8s-namespace-mirroring-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- end }} + + {{- end }} + resources: + requests: + memory: "50Mi" + cpu: "50m" + limits: + memory: "50Mi" + cpu: "50m" {{- if .Values.global.acls.tolerations }} tolerations: {{ tpl .Values.global.acls.tolerations . | indent 8 | trim }} diff --git a/charts/consul/templates/server-config-configmap.yaml b/charts/consul/templates/server-config-configmap.yaml index e37d7f4841..e35311a9c7 100644 --- a/charts/consul/templates/server-config-configmap.yaml +++ b/charts/consul/templates/server-config-configmap.yaml @@ -27,13 +27,8 @@ data: "data_dir": "/consul/data", "domain": "{{ .Values.global.domain }}", "ports": { - {{- if not .Values.global.tls.enabled }} - "grpc": 8502, - {{- end }} - {{- if .Values.global.tls.enabled }} - "grpc_tls": 8502, - {{- end }} - "serf_lan": {{ .Values.server.ports.serflan.port }} + "serf_lan": {{ .Values.server.ports.serflan.port }}, + "grpc": 8503 }, "recursors": {{ .Values.global.recursors | toJson }}, "retry_join": ["{{template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:{{ .Values.server.ports.serflan.port }}"], diff --git a/charts/consul/templates/server-podsecuritypolicy.yaml b/charts/consul/templates/server-podsecuritypolicy.yaml index 09e8d75bd1..507a07179f 100644 --- a/charts/consul/templates/server-podsecuritypolicy.yaml +++ b/charts/consul/templates/server-podsecuritypolicy.yaml @@ -35,8 +35,8 @@ spec: max: {{ .Values.server.ports.serflan.port }} - min: 8302 max: 8302 - - min: 8502 - max: 8502 + - min: 8503 + max: 8503 {{- end }} hostIPC: false hostPID: false diff --git a/charts/consul/templates/server-service.yaml b/charts/consul/templates/server-service.yaml index a392f0e76b..4b1c714c1b 100644 --- a/charts/consul/templates/server-service.yaml +++ b/charts/consul/templates/server-service.yaml @@ -19,6 +19,10 @@ metadata: {{- if .Values.server.service.annotations }} {{ tpl .Values.server.service.annotations . | nindent 4 | trim }} {{- end }} + # This must be set in addition to publishNotReadyAddresses due + # to an open issue where it may not work: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" spec: clusterIP: None # We want the servers to become available even if they're not ready @@ -36,8 +40,8 @@ spec: targetPort: 8501 {{- end }} - name: grpc - port: 8502 - targetPort: 8502 + port: 8503 + targetPort: 8503 - name: serflan-tcp protocol: "TCP" port: 8301 diff --git a/charts/consul/templates/server-snapshot-agent-configmap.yaml b/charts/consul/templates/server-snapshot-agent-configmap.yaml deleted file mode 100644 index da68d1509c..0000000000 --- a/charts/consul/templates/server-snapshot-agent-configmap.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.server.snapshotAgent.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "consul.fullname" . }}-snapshot-agent-config - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - component: server -data: - snapshot-login.json: | - { - "snapshot_agent": { - "login": { - "auth_method": "{{ template "consul.fullname" . }}-k8s-component-auth-method", - "bearer_token_file": "/var/run/secrets/kubernetes.io/serviceaccount/token", - "meta": {"component": "snapshot-agent"} - } - } - } -{{- end }} diff --git a/charts/consul/templates/server-statefulset.yaml b/charts/consul/templates/server-statefulset.yaml index 23894c4a04..71b424cc68 100644 --- a/charts/consul/templates/server-statefulset.yaml +++ b/charts/consul/templates/server-statefulset.yaml @@ -10,15 +10,12 @@ {{- if (and .Values.global.secretsBackend.vault.enabled (not .Values.global.secretsBackend.vault.consulServerRole)) }}{{ fail "global.secretsBackend.vault.consulServerRole must be provided if global.secretsBackend.vault.enabled=true." }}{{ end -}} {{- if (and .Values.server.serverCert.secretName (not .Values.global.tls.caCert.secretName)) }}{{ fail "If server.serverCert.secretName is provided, global.tls.caCert.secretName must also be provided" }}{{ end }} {{- if (and (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) (not .Values.global.tls.caCert.secretName)) }}{{ fail "global.tls.caCert.secretName must be provided if global.tls.enabled=true and global.secretsBackend.vault.enabled=true." }}{{ end -}} +{{- if (and (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) (not .Values.global.tls.enableAutoEncrypt)) }}{{ fail "global.tls.enableAutoEncrypt must be true if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" }}{{ end -}} +{{- if (and (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) (not .Values.global.secretsBackend.vault.consulCARole)) }}{{ fail "global.secretsBackend.vault.consulCARole must be provided if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" }}{{ end -}} {{- if (and .Values.global.enterpriseLicense.secretName (not .Values.global.enterpriseLicense.secretKey)) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if (and (not .Values.global.enterpriseLicense.secretName) .Values.global.enterpriseLicense.secretKey) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.acls.bootstrapToken.secretKey)) }}{{fail "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided." }}{{ end -}} {{- if (and (not .Values.global.acls.bootstrapToken.secretName) .Values.global.acls.bootstrapToken.secretKey) }}{{fail "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided." }}{{ end -}} -{{- if .Values.server.snapshotAgent.enabled -}} -{{- if or (and .Values.server.snapshotAgent.configSecret.secretName (not .Values.server.snapshotAgent.configSecret.secretKey)) (and (not .Values.server.snapshotAgent.configSecret.secretName) .Values.server.snapshotAgent.configSecret.secretKey) }}{{fail "server.snapshotAgent.configSecret.secretKey and server.snapshotAgent.configSecret.secretName must both be specified." }}{{ end -}} -{{- end -}} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} # StatefulSet to run the actual Consul server cluster. apiVersion: apps/v1 kind: StatefulSet @@ -98,12 +95,6 @@ spec: "vault.hashicorp.com/agent-inject-template-enterpriselicense.txt": {{ template "consul.vaultSecretTemplate" . }} {{- end }} {{- end }} - {{- if .Values.server.snapshotAgent.configSecret.secretName }} - {{- with .Values.server.snapshotAgent.configSecret }} - "vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json": "{{ .secretName }}" - "vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json": {{ template "consul.vaultSecretTemplate" . }} - {{- end }} - {{- end }} {{- end }} "consul.hashicorp.com/connect-inject": "false" "consul.hashicorp.com/config-checksum": {{ include (print $.Template.BasePath "/server-config-configmap.yaml") . | sha256sum }} @@ -172,26 +163,6 @@ spec: - key: {{ .Values.global.secretsBackend.vault.ca.secretKey }} path: tls.crt {{- end }} - {{- if .Values.server.snapshotAgent.enabled }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: snapshot-agent-config - configMap: - name: {{ template "consul.fullname" . }}-snapshot-agent-config - {{- end }} - {{- if (and .Values.server.snapshotAgent.configSecret.secretName .Values.server.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} - - name: snapshot-agent-user-config - secret: - secretName: {{ .Values.server.snapshotAgent.configSecret.secretName }} - items: - - key: {{ .Values.server.snapshotAgent.configSecret.secretKey }} - path: snapshot-config.json - {{- end }} - {{- if .Values.server.snapshotAgent.caCert }} - - name: extra-ssl-certs - emptyDir: - medium: "Memory" - {{- end }} - {{- end }} {{- range .Values.server.extraVolumes }} - name: userconfig-{{ .name }} {{ .type }}: @@ -283,55 +254,6 @@ spec: name: {{ .Values.global.acls.replicationToken.secretName | quote }} key: {{ .Values.global.acls.replicationToken.secretKey | quote }} {{- end }} - {{- if .Values.global.cloud.enabled}} - # These are mounted as secrets so that the consul server agent can use them. - # - the hcp-go-sdk in consul agent will already look for HCP_CLIENT_ID, HCP_CLIENT_SECRET, HCP_AUTH_URL, - # HCP_SCADA_ADDRESS, and HCP_API_HOST. so nothing more needs to be done. - # - HCP_RESOURCE_ID is created for use in the - # `-hcl="cloud { resource_id = \"${HCP_RESOURCE_ID}\" }"` logic in the command below. - {{- if .Values.global.cloud.clientId.secretName }} - - name: HCP_CLIENT_ID - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.clientId.secretName }} - key: {{ .Values.global.cloud.clientId.secretKey }} - {{- end }} - {{- if .Values.global.cloud.clientSecret.secretName }} - - name: HCP_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.clientSecret.secretName }} - key: {{ .Values.global.cloud.clientSecret.secretKey }} - {{- end}} - {{- if .Values.global.cloud.resourceId.secretName }} - - name: HCP_RESOURCE_ID - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.resourceId.secretName }} - key: {{ .Values.global.cloud.resourceId.secretKey }} - {{- end }} - {{- if .Values.global.cloud.authUrl.secretName }} - - name: HCP_AUTH_URL - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.authUrl.secretName }} - key: {{ .Values.global.cloud.authUrl.secretKey }} - {{- end}} - {{- if .Values.global.cloud.apiHost.secretName }} - - name: HCP_API_HOST - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.apiHost.secretName }} - key: {{ .Values.global.cloud.apiHost.secretKey }} - {{- end}} - {{- if .Values.global.cloud.scadaAddress.secretName }} - - name: HCP_SCADA_ADDRESS - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.scadaAddress.secretName }} - key: {{ .Values.global.cloud.scadaAddress.secretKey }} - {{- end}} - {{- end }} {{- include "consul.extraEnvironmentVars" .Values.server | nindent 12 }} command: - "/bin/sh" @@ -340,6 +262,10 @@ spec: {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.gossipEncryption.secretName }} GOSSIP_KEY=`cat /vault/secrets/gossip.txt` {{- end }} + + {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} + {{ template "consul.recursors" }} + {{- end }} {{ template "consul.extraconfig" }} @@ -356,6 +282,9 @@ spec: -hcl="acl { tokens { agent = \"${ACL_REPLICATION_TOKEN}\", replication = \"${ACL_REPLICATION_TOKEN}\" } }" \ {{- end }} {{- end }} + {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} + $recursor_flags \ + {{- end }} {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.bootstrapToken.secretName }} -config-file=/vault/secrets/bootstrap-token-config.hcl \ {{- else if (and (not .Values.global.secretsBackend.vault.enabled) .Values.global.acls.bootstrapToken.secretName) }} @@ -370,9 +299,6 @@ spec: {{- end }} {{- end }} -config-file=/consul/extra-config/extra-from-values.json - {{- if and .Values.global.cloud.enabled .Values.global.cloud.resourceId.secretName }} - -hcl="cloud { resource_id = \"${HCP_RESOURCE_ID}\" }" - {{- end }} volumeMounts: - name: data-{{ .Release.Namespace | trunc 58 | trimSuffix "-" }} mountPath: /consul/data @@ -412,12 +338,11 @@ spec: - name: https containerPort: 8501 {{- end }} - - name: grpc - containerPort: 8502 + - containerPort: 8503 {{- if .Values.server.exposeGossipAndRPCPorts }} - hostPort: 8502 + hostPort: 8503 {{- end }} - protocol: "TCP" + name: grpc - name: serflan-tcp containerPort: {{ .Values.server.ports.serflan.port }} {{- if .Values.server.exposeGossipAndRPCPorts }} @@ -488,87 +413,6 @@ spec: {{- if .Values.server.extraContainers }} {{ toYaml .Values.server.extraContainers | nindent 8 }} {{- end }} - {{- if .Values.server.snapshotAgent.enabled }} - - name: consul-snapshot-agent - image: "{{ default .Values.global.image .Values.server.image }}" - env: - {{- if .Values.server.snapshotAgent.caCert }} - - name: SSL_CERT_DIR - value: "/etc/ssl/certs:/extra-ssl-certs" - {{- end }} - {{- if .Values.global.tls.enabled }} - - name: CONSUL_HTTP_ADDR - value: https://127.0.0.1:8501 - - name: CONSUL_CACERT - {{- if .Values.global.secretsBackend.vault.enabled }} - value: /vault/secrets/serverca.crt - {{- else }} - value: /consul/tls/ca/tls.crt - {{- end }} - {{- else }} - - name: CONSUL_HTTP_ADDR - value: http://127.0.0.1:8500 - {{- end }} - {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.acls.manageSystemACLs)) }} - - name: CONSUL_LICENSE_PATH - {{- if .Values.global.secretsBackend.vault.enabled }} - value: /vault/secrets/enterpriselicense.txt - {{- else }} - value: /consul/license/{{ .Values.global.enterpriseLicense.secretKey }} - {{- end }} - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - {{- if .Values.server.snapshotAgent.caCert }} - cat < /extra-ssl-certs/custom-ca.pem - {{- .Values.server.snapshotAgent.caCert | nindent 14 }} - EOF - {{- end }} - exec /bin/consul snapshot agent \ - -interval={{ .Values.server.snapshotAgent.interval }} \ - {{- if .Values.global.acls.manageSystemACLs }} - -config-file=/consul/config/snapshot-login.json \ - {{- end }} - {{- if (and .Values.server.snapshotAgent.configSecret.secretName .Values.server.snapshotAgent.configSecret.secretKey) }} - {{- if .Values.global.secretsBackend.vault.enabled }} - -config-file=/vault/secrets/snapshot-agent-config.json \ - {{- else }} - -config-dir=/consul/user-config \ - {{- end }} - {{- end }} - volumeMounts: - {{- if .Values.global.acls.manageSystemACLs }} - - name: snapshot-agent-config - mountPath: /consul/config - readOnly: true - {{- end }} - {{- if .Values.server.snapshotAgent.caCert }} - - name: extra-ssl-certs - mountPath: /extra-ssl-certs - readOnly: false - {{- end }} - {{- if (and .Values.server.snapshotAgent.configSecret.secretName .Values.server.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} - - name: snapshot-agent-user-config - mountPath: /consul/user-config - readOnly: true - {{- end }} - {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled) (not .Values.global.acls.manageSystemACLs))}} - - name: consul-license - mountPath: /consul/license - readOnly: true - {{- end }} - {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- with .Values.server.snapshotAgent.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- end }} {{- if .Values.server.nodeSelector }} nodeSelector: {{ tpl .Values.server.nodeSelector . | indent 8 | trim }} diff --git a/charts/consul/templates/sync-catalog-deployment.yaml b/charts/consul/templates/sync-catalog-deployment.yaml index a756a817bd..4c8b4359da 100644 --- a/charts/consul/templates/sync-catalog-deployment.yaml +++ b/charts/consul/templates/sync-catalog-deployment.yaml @@ -1,7 +1,6 @@ +{{- $clientEnabled := (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} {{- if (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} {{- template "consul.reservedNamesFailer" (list .Values.syncCatalog.consulNamespaces.consulDestinationNamespace "syncCatalog.consulNamespaces.consulDestinationNamespace") }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} # The deployment for running the sync-catalog pod apiVersion: apps/v1 kind: Deployment @@ -54,6 +53,9 @@ spec: spec: serviceAccountName: {{ template "consul.fullname" . }}-sync-catalog volumes: + - name: consul-data + emptyDir: + medium: "Memory" {{- if .Values.global.tls.enabled }} {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert @@ -67,28 +69,24 @@ spec: - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} path: tls.crt {{- end }} + {{- if (and .Values.global.tls.enableAutoEncrypt $clientEnabled) }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} {{- end }} containers: - name: sync-catalog image: "{{ default .Values.global.imageK8S .Values.syncCatalog.image }}" env: - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 12 }} {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} - {{- else }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method - {{- end }} - - name: CONSUL_LOGIN_DATACENTER - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} - value: {{ .Values.global.federation.primaryDatacenter }} - {{- else }} - value: {{ .Values.global.datacenter }} - {{- end }} - - name: CONSUL_LOGIN_META - value: "component=sync-catalog,pod=$(NAMESPACE)/$(POD_NAME)" + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: NAMESPACE valueFrom: fieldRef: @@ -100,9 +98,35 @@ spec: name: {{ .Values.syncCatalog.aclSyncToken.secretName }} key: {{ .Values.syncCatalog.aclSyncToken.secretKey }} {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if .Values.client.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + {{- else }} + - name: CONSUL_HTTP_ADDR + value: https://{{ template "consul.fullname" . }}-server:8501 + {{- end }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + {{- if .Values.client.enabled }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://{{ template "consul.fullname" . }}-server:8500 + {{- end }} + {{- end }} volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: true {{- if .Values.global.tls.enabled }} + {{- if and .Values.global.tls.enableAutoEncrypt $clientEnabled }} + - name: consul-auto-encrypt-ca-cert + {{- else }} - name: consul-ca-cert + {{- end }} mountPath: /consul/tls/ca readOnly: true {{- end }} @@ -111,6 +135,7 @@ spec: - "-ec" - | consul-k8s-control-plane sync-catalog \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ default .Values.global.logLevel .Values.syncCatalog.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ -k8s-default-sync={{ .Values.syncCatalog.default }} \ @@ -173,6 +198,16 @@ spec: -consul-cross-namespace-acl-policy=cross-namespace-policy \ {{- end }} {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane consul-logout -consul-api-timeout={{ .Values.global.consulAPITimeout }} + {{- end }} livenessProbe: httpGet: path: /health/ready @@ -197,6 +232,69 @@ spec: resources: {{- toYaml . | nindent 12 }} {{- end }} + {{- if or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt $clientEnabled) }} + initContainers: + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt $clientEnabled) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: sync-catalog-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=sync-catalog \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel .Values.syncCatalog.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "25Mi" + cpu: "50m" + {{- end }} + {{- end }} {{- if .Values.syncCatalog.priorityClassName }} priorityClassName: {{ .Values.syncCatalog.priorityClassName | quote }} {{- end }} diff --git a/charts/consul/templates/terminating-gateways-deployment.yaml b/charts/consul/templates/terminating-gateways-deployment.yaml index 52baf20903..acdab29af8 100644 --- a/charts/consul/templates/terminating-gateways-deployment.yaml +++ b/charts/consul/templates/terminating-gateways-deployment.yaml @@ -1,8 +1,9 @@ {{- if .Values.terminatingGateways.enabled }} {{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true" }}{{ end -}} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled" }}{{ end -}} +{{- if .Values.global.lifecycleSidecarContainer }}{{ fail "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." }}{{ end }} {{- $root := . }} {{- $defaults := .Values.terminatingGateways.defaults }} @@ -67,14 +68,7 @@ spec: release: {{ $root.Release.Name }} component: terminating-gateway terminating-gateway-name: {{ template "consul.fullname" $root }}-{{ .name }} - consul.hashicorp.com/connect-inject-managed-by: consul-k8s-endpoints-controller annotations: - "consul.hashicorp.com/connect-inject": "false" - "consul.hashicorp.com/gateway-kind": "terminating-gateway" - "consul.hashicorp.com/gateway-consul-service-name": "{{ .name }}" - {{- if $root.Values.global.enableConsulNamespaces }} - "consul.hashicorp.com/gateway-namespace": {{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} {{- if (and $root.Values.global.secretsBackend.vault.enabled $root.Values.global.tls.enabled) }} "vault.hashicorp.com/agent-init-first": "true" "vault.hashicorp.com/agent-inject": "true" @@ -89,6 +83,7 @@ spec: {{ tpl $root.Values.global.secretsBackend.vault.agentAnnotations $root | nindent 8 | trim }} {{- end }} {{- end }} + "consul.hashicorp.com/connect-inject": "false" {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} "prometheus.io/scrape": "true" "prometheus.io/path": "/metrics" @@ -117,86 +112,156 @@ spec: terminationGracePeriodSeconds: 10 serviceAccountName: {{ template "consul.fullname" $root }}-{{ .name }} volumes: - - name: consul-service - emptyDir: - medium: "Memory" - {{- range (default $defaults.extraVolumes .extraVolumes) }} - - name: userconfig-{{ .name }} - {{ .type }}: - {{- if (eq .type "configMap") }} - name: {{ .name }} - {{- else if (eq .type "secret") }} - secretName: {{ .name }} - {{- end }} - {{- with .items }} - items: - {{- range . }} - - key: {{.key}} - path: {{.path}} - {{- end }} + - name: consul-bin + emptyDir: {} + - name: consul-service + emptyDir: + medium: "Memory" + {{- range (default $defaults.extraVolumes .extraVolumes) }} + - name: userconfig-{{ .name }} + {{ .type }}: + {{- if (eq .type "configMap") }} + name: {{ .name }} + {{- else if (eq .type "secret") }} + secretName: {{ .name }} + {{- end }} + {{- with .items }} + items: + {{- range . }} + - key: {{.key}} + path: {{.path}} + {{- end }} + {{- end }} + {{- end }} + {{- if $root.Values.global.tls.enabled }} + {{- if not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) }} + - name: consul-ca-cert + secret: + {{- if $root.Values.global.tls.caCert.secretName }} + secretName: {{ $root.Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" $root }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" $root.Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} + initContainers: + # We use the Envoy image as our base image so we use an init container to + # copy the Consul binary to a shared directory that can be used when + # starting Envoy. + - name: copy-consul-bin + image: {{ $root.Values.global.image | quote }} + command: + - cp + - /bin/consul + - /consul-bin/consul + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- $initContainer := .initCopyConsulContainer }} + {{- if (or $initContainer $defaults.initCopyConsulContainer) }} + {{- if (default $defaults.initCopyConsulContainer.resources $initContainer.resources) }} + resources: {{ toYaml (default $defaults.initCopyConsulContainer.resources $initContainer.resources) | nindent 12 }} {{- end }} - {{- end }} - {{- if $root.Values.global.tls.enabled }} - {{- if not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) }} - - name: consul-ca-cert - secret: - {{- if $root.Values.global.tls.caCert.secretName }} - secretName: {{ $root.Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" $root }}-ca-cert {{- end }} - items: - - key: {{ default "tls.crt" $root.Values.global.tls.caCert.secretKey }} - path: tls.crt - {{- end }} - {{- end }} - initContainers: + {{- if (and $root.Values.global.tls.enabled $root.Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" $root | nindent 8 }} + {{- end }} # terminating-gateway-init registers the terminating gateway service with Consul. - name: terminating-gateway-init image: {{ $root.Values.global.imageK8S }} env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- include "consul.consulK8sConsulServerEnvVars" $root | nindent 10 }} - {{- if $root.Values.global.enableConsulNamespaces }} - - name: CONSUL_NAMESPACE - value: {{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} - {{- if $root.Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - value: {{ template "consul.fullname" $root }}-k8s-component-auth-method - - name: CONSUL_LOGIN_DATACENTER - value: {{ $root.Values.global.datacenter }} - - name: CONSUL_LOGIN_META - value: "component=terminating-gateway,pod=$(NAMESPACE)/$(POD_NAME)" - {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} command: - "/bin/sh" - "-ec" - | - consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${NAMESPACE} \ - -gateway-kind="terminating-gateway" \ - -consul-node-name="k8s-service-mesh" \ - -proxy-id-file=/consul/service/proxy-id \ - -service-name={{ .name }} \ + {{- if $root.Values.global.acls.manageSystemACLs }} + consul-k8s-control-plane acl-init \ + -component-name=terminating-gateway/{{ template "consul.fullname" $root }}-{{ .name }} \ + -acl-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method \ + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -token-sink-file=/consul/service/acl-token \ + -consul-api-timeout={{ $root.Values.global.consulAPITimeout }} \ -log-level={{ default $root.Values.global.logLevel }} \ -log-json={{ $root.Values.global.logJSON }} + {{- end }} + + cat > /consul/service/service.hcl << EOF + service { + kind = "terminating-gateway" + name = "{{ .name }}" + id = "${POD_NAME}" + {{- if $root.Values.global.enableConsulNamespaces }} + namespace = "{{ (default $defaults.consulNamespace .consulNamespace) }}" + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + partition = "{{ $root.Values.global.adminPartitions.name }}" + {{- end }} + address = "${POD_IP}" + port = 8443 + {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} + proxy { config { envoy_prometheus_bind_addr = "${POD_IP}:20200" } } + {{- end }} + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] + } + EOF + + /consul-bin/consul services register \ + {{- if $root.Values.global.acls.manageSystemACLs }} + -token-file=/consul/service/acl-token \ + {{- end }} + /consul/service/service.hcl volumeMounts: - name: consul-service mountPath: /consul/service + - name: consul-bin + mountPath: /consul-bin {{- if $root.Values.global.tls.enabled }} - {{- if not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} - name: consul-ca-cert + {{- end }} mountPath: /consul/tls/ca readOnly: true {{- end }} - {{- end }} resources: requests: memory: "50Mi" @@ -206,85 +271,72 @@ spec: cpu: "50m" containers: - name: terminating-gateway - image: {{ $root.Values.global.imageConsulDataplane | quote }} - volumeMounts: - - name: consul-service - mountPath: /consul/service - readOnly: true - {{- if $root.Values.global.tls.enabled }} - {{- if not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- end }} - {{- range (default $defaults.extraVolumes .extraVolumes) }} - - name: userconfig-{{ .name }} - readOnly: true - mountPath: /consul/userconfig/{{ .name }} - {{- end }} + image: {{ $root.Values.global.imageEnvoy | quote }} {{- if (default $defaults.resources .resources) }} resources: {{ toYaml (default $defaults.resources .resources) | nindent 12 }} {{- end }} + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + - mountPath: /consul/service + name: consul-service + readOnly: true + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- range (default $defaults.extraVolumes .extraVolumes) }} + - name: userconfig-{{ .name }} + readOnly: true + mountPath: /consul/userconfig/{{ .name }} + {{- end }} env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if $root.Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/service/acl-token" + {{- end }} + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_GRPC_ADDR + value: https://$(HOST_IP):8502 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + - name: CONSUL_GRPC_ADDR + value: $(HOST_IP):8502 + {{- end }} command: - - /bin/sh - - -ec - - | - consul-dataplane \ - {{- if $root.Values.externalServers.enabled }} - -addresses={{ $root.Values.externalServers.hosts | first | quote }} \ - {{- else }} - -addresses="{{ template "consul.fullname" $root }}-server.{{ $root.Release.Namespace }}.svc" \ - {{- end }} - {{- if $root.Values.externalServers.enabled }} - -grpc-port={{ $root.Values.externalServers.grpcPort }} \ - {{- else }} - -grpc-port=8502 \ - {{- end }} - -proxy-service-id=$POD_NAME \ - -service-node-name="k8s-service-mesh" \ - {{- if $root.Values.global.enableConsulNamespaces }} - -service-namespace={{ (default $defaults.consulNamespace .consulNamespace) }} \ - {{- end }} - {{- if and $root.Values.global.tls.enabled }} - {{- if (not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots)) }} - -ca-certs=/consul/tls/ca/tls.crt \ - {{- end }} - {{- if and $root.Values.externalServers.enabled $root.Values.externalServers.tlsServerName }} - -tls-server-name={{$root.Values.externalServers.tlsServerName }} \ - {{- else if $root.Values.global.cloud.enabled }} - -tls-server-name=server.{{ $root.Values.global.datacenter}}.{{ $root.Values.global.domain}} \ - {{- end }} - {{- else }} - -tls-disabled \ - {{- end }} - {{- if $root.Values.global.acls.manageSystemACLs }} - -credential-type=login \ - -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token \ - -login-meta=component=terminating-gateway \ - -login-meta=pod=${NAMESPACE}/${POD_NAME} \ - -login-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method \ - {{- if $root.Values.global.adminPartitions.enabled }} - -login-partition={{ $root.Values.global.adminPartitions.name }} \ - {{- end }} - {{- end }} - {{- if $root.Values.global.adminPartitions.enabled }} - -service-partition={{ $root.Values.global.adminPartitions.name }} \ - {{- end }} - -log-level={{ default $root.Values.global.logLevel }} \ - -log-json={{ $root.Values.global.logJSON }} \ - {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} - -telemetry-prom-scrape-path="/metrics" - {{- end }} + - /consul-bin/consul + - connect + - envoy + - -gateway=terminating + - -proxy-id=$(POD_NAME) + {{- if $root.Values.global.enableConsulNamespaces }} + - -namespace={{ default $defaults.consulNamespace .consulNamespace }} + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + - -partition={{ $root.Values.global.adminPartitions.name }} + {{- end }} livenessProbe: tcpSocket: port: 8443 @@ -304,6 +356,78 @@ spec: ports: - name: gateway containerPort: 8443 + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + /consul-bin/consul services deregister \ + {{- if $root.Values.global.enableConsulNamespaces }} + -namespace={{ default $defaults.consulNamespace .consulNamespace }} \ + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -id="${POD_NAME}" + {{- if $root.Values.global.acls.manageSystemACLs }} + - "/consul-bin/consul logout" + {{- end}} + + # consul-sidecar ensures the terminating gateway is always registered with + # the local Consul agent, even if it loses the initial registration. + - name: consul-sidecar + image: {{ $root.Values.global.imageK8S }} + volumeMounts: + - name: consul-service + mountPath: /consul/service + readOnly: true + - name: consul-bin + mountPath: /consul-bin + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if $root.Values.global.consulSidecarContainer }} + {{- if $root.Values.global.consulSidecarContainer.resources }} + resources: {{ toYaml $root.Values.global.consulSidecarContainer.resources | nindent 12 }} + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + command: + - consul-k8s-control-plane + - consul-sidecar + - -log-level={{ $root.Values.global.logLevel }} + - -log-json={{ $root.Values.global.logJSON }} + - -service-config=/consul/service/service.hcl + - -consul-binary=/consul-bin/consul + - -consul-api-timeout={{ $root.Values.global.consulAPITimeout }} + {{- if $root.Values.global.acls.manageSystemACLs }} + - -token-file=/consul/service/acl-token + {{- end }} {{- if (default $defaults.priorityClassName .priorityClassName) }} priorityClassName: {{ (default $defaults.priorityClassName .priorityClassName) | quote }} {{- end }} diff --git a/charts/consul/templates/terminating-gateways-service.yaml b/charts/consul/templates/terminating-gateways-service.yaml deleted file mode 100644 index 124900e727..0000000000 --- a/charts/consul/templates/terminating-gateways-service.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- if .Values.terminatingGateways.enabled }} - -{{- $root := . }} -{{- $defaults := .Values.terminatingGateways.defaults }} - -{{- range .Values.terminatingGateways.gateways }} - -{{- $service := .service }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "consul.fullname" $root }}-{{ .name }} - namespace: {{ $root.Release.Namespace }} - labels: - app: {{ template "consul.name" $root }} - chart: {{ template "consul.chart" $root }} - heritage: {{ $root.Release.Service }} - release: {{ $root.Release.Name }} - component: terminating-gateway -spec: - selector: - app: {{ template "consul.name" $root }} - release: "{{ $root.Release.Name }}" - component: terminating-gateway - type: ClusterIP - ports: - - port: 80 - targetPort: 8443 ---- -{{- end }} -{{- end }} diff --git a/charts/consul/test/docker/Test.dockerfile b/charts/consul/test/docker/Test.dockerfile index 85f3a607e3..d60e8b0a24 100644 --- a/charts/consul/test/docker/Test.dockerfile +++ b/charts/consul/test/docker/Test.dockerfile @@ -6,7 +6,7 @@ # a script to configure kubectl, potentially install Helm, and run the tests # manually. This image only has the dependencies pre-installed. -FROM cimg/go:1.19 +FROM cimg/go:1.18 # change the user to root so we can install stuff USER root @@ -61,7 +61,7 @@ RUN curl -sSL https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/o && mv /tmp/oc /usr/local/bin/oc # AWS CLI -RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" \ +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.8.3.zip" -o "awscliv2.zip" \ && unzip awscliv2.zip \ && ./aws/install --bin-dir /usr/local/bin \ && rm awscliv2.zip \ @@ -73,4 +73,4 @@ RUN curl -Lo aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-au && mv ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator # change the user back to what circleci/golang image has -USER circleci +USER circleci \ No newline at end of file diff --git a/charts/consul/test/terraform/eks/main.tf b/charts/consul/test/terraform/eks/main.tf index c466334315..9ccc2cdd2b 100644 --- a/charts/consul/test/terraform/eks/main.tf +++ b/charts/consul/test/terraform/eks/main.tf @@ -53,8 +53,8 @@ module "vpc" { module "eks" { count = var.cluster_count - source = "terraform-aws-modules/eks/aws" - version = "17.24.0" + source = "terraform-aws-modules/eks/aws" + version = "17.24.0" kubeconfig_api_version = "client.authentication.k8s.io/v1beta1" cluster_name = "consul-k8s-${random_id.suffix[count.index].dec}" diff --git a/charts/consul/test/unit/api-gateway-controller-deployment.bats b/charts/consul/test/unit/api-gateway-controller-deployment.bats index 4ec5d8c62c..60adc84076 100755 --- a/charts/consul/test/unit/api-gateway-controller-deployment.bats +++ b/charts/consul/test/unit/api-gateway-controller-deployment.bats @@ -15,6 +15,7 @@ load _helpers -s templates/api-gateway-controller-deployment.yaml \ --set 'apiGateway.enabled=true' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "apiGateway.image must be set to enable api gateway" ]] } @@ -903,231 +904,3 @@ load _helpers yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) [ "${actual}" = "bar" ] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "apiGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} diff --git a/charts/consul/test/unit/client-config-configmap.bats b/charts/consul/test/unit/client-config-configmap.bats index 5fc4a186d9..b1b6035429 100755 --- a/charts/consul/test/unit/client-config-configmap.bats +++ b/charts/consul/test/unit/client-config-configmap.bats @@ -2,11 +2,19 @@ load _helpers +@test "client/ConfigMap: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-config-configmap.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "client/ConfigMap: enable with global.enabled false" { cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'global.enabled=false' \ --set 'client.enabled=true' \ . | tee /dev/stderr | @@ -14,11 +22,10 @@ load _helpers [ "${actual}" = "true" ] } -@test "client/ConfigMap: disable with client.enabled false" { +@test "client/ConfigMap: disable with client.enabled" { cd `chart_dir` assert_empty helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'client.enabled=false' \ . } @@ -35,7 +42,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'client.extraConfig="{\"hello\": \"world\"}"' \ . | tee /dev/stderr | yq '.data["extra-from-values.json"] | match("world") | length > 1' | tee /dev/stderr) @@ -49,7 +55,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq '.data["central-config.json"] | contains("enable_central_service_config")' | tee /dev/stderr) @@ -60,7 +65,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq '.data["config.json"] | contains("check_update_interval")' | tee /dev/stderr) @@ -74,7 +78,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -89,7 +92,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.data["client.json"]' | jq -r .auto_reload_config | tee /dev/stderr) diff --git a/charts/consul/test/unit/client-daemonset.bats b/charts/consul/test/unit/client-daemonset.bats index 1d77d0f9d3..24cc4324f2 100755 --- a/charts/consul/test/unit/client-daemonset.bats +++ b/charts/consul/test/unit/client-daemonset.bats @@ -2,6 +2,15 @@ load _helpers +@test "client/DaemonSet: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "client/DaemonSet: enabled with global.enabled=false and client.enabled=true" { cd `chart_dir` local actual=$(helm template \ @@ -33,7 +42,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.image=foo' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) @@ -44,7 +52,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.image=foo' \ --set 'client.image=bar' \ . | tee /dev/stderr | @@ -56,7 +63,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.updateStrategy' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -69,7 +75,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.replicas=3' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) @@ -88,7 +93,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.replicas=3' \ --set 'server.ports.serflan.port=9301' \ . | tee /dev/stderr | @@ -108,7 +112,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ @@ -128,7 +131,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ @@ -147,7 +149,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("grpc"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -157,7 +158,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.grpc=false' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("grpc"))' | tee /dev/stderr) @@ -171,7 +171,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-node-meta=pod-name:${HOSTNAME}"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -181,7 +180,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-node-meta=host-ip:${HOST_IP}"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -191,7 +189,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.nodeMeta.pod-name=foobar' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-node-meta=pod-name:foobar"))' | tee /dev/stderr) @@ -202,7 +199,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.nodeMeta.cluster-name=cluster01' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-node-meta=cluster-name:cluster01"))' | tee /dev/stderr) @@ -216,7 +212,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) [ "${actual}" = '{"limits":{"cpu":"100m","memory":"100Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] @@ -226,7 +221,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.resources.foo=bar' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].resources.foo' | tee /dev/stderr) @@ -238,7 +232,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.resources=foo: bar' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].resources.foo' | tee /dev/stderr) @@ -254,7 +247,6 @@ load _helpers # check that the extra-config volume is defined local volume_name=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.volumes[] | select(.name == "extra-config") | .name' | tee /dev/stderr) [ "${volume_name}" = "extra-config" ] @@ -262,7 +254,6 @@ load _helpers # check that the consul container mounts the volume at /consul/extra-config local mount_path=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[] | select(.name == "consul") | .volumeMounts[] | select(.name == "extra-config") | .mountPath' | tee /dev/stderr) [ "${mount_path}" = "/consul/extra-config" ] @@ -277,7 +268,6 @@ load _helpers # Test that it defines it local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -294,7 +284,6 @@ load _helpers # Test that it mounts it local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -311,7 +300,6 @@ load _helpers # Doesn't load it local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -325,7 +313,6 @@ load _helpers # Test that it defines it local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=secret' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -342,7 +329,6 @@ load _helpers # Test that it mounts it local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -359,7 +345,6 @@ load _helpers # Doesn't load it local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -371,7 +356,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ --set 'client.extraVolumes[0].load=true' \ @@ -387,7 +371,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -397,7 +380,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.nodeSelector=testing' \ . | tee /dev/stderr | yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) @@ -411,7 +393,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec | .affinity? == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -421,7 +402,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.affinity=foobar' \ . | tee /dev/stderr | yq '.spec.template.spec | .affinity == "foobar"' | tee /dev/stderr) @@ -435,7 +415,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.priorityClassName' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -445,7 +424,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.priorityClassName=testing' \ . | tee /dev/stderr | yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) @@ -459,7 +437,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component") | del(."hasDNS")' | tee /dev/stderr) [ "${actual}" = "{}" ] @@ -469,7 +446,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraLabels.foo=bar' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) @@ -480,7 +456,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraLabels.foo=bar' \ --set 'client.extraLabels.baz=qux' \ . | tee /dev/stderr) @@ -498,7 +473,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."consul.hashicorp.com/config-checksum")' | tee /dev/stderr) [ "${actual}" = "{}" ] @@ -508,7 +482,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.annotations=foo: bar' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) @@ -522,7 +495,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ . | tee /dev/stderr | @@ -534,7 +506,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ . | tee /dev/stderr | @@ -546,7 +517,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ . | tee /dev/stderr | @@ -558,7 +528,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ . | tee /dev/stderr | @@ -571,7 +540,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ --set 'global.metrics.agentMetricsRetentionTime=5m' \ @@ -588,28 +556,25 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = f9be2829fed80a127e3752e10be32f29c2f9ca0ea548abcf3d4fc2c985cb7201 ] + [ "${actual}" = 55f93d04c3f0b85c7ef2869e4b8623296025a8388c881eab63be9f2dc70bafd6 ] } @test "client/DaemonSet: config-checksum annotation changes when extraConfig is provided" { cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraConfig="{\"hello\": \"world\"}"' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = e9fb5f0b4ff4e36a89e8ca2dc1aed2072306e0dd6d4cc60b3edf155cf8dbe2e9 ] + [ "${actual}" = 891c0e207e1e0259ffb150d7364b667b7b12786ce37af3dd89f366bc6d2f21aa ] } @test "client/DaemonSet: config-checksum annotation changes when connectInject.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) @@ -623,7 +588,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec | .tolerations? == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -633,7 +597,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.tolerations=foobar' \ . | tee /dev/stderr | yq '.spec.template.spec.tolerations == "foobar"' | tee /dev/stderr) @@ -647,7 +610,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY")' | tee /dev/stderr) [ "${actual}" = "" ] @@ -657,7 +619,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.autoGenerate=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | .valueFrom.secretKeyRef | [.name=="release-name-consul-gossip-encryption-key", .key="key"] | all' | tee /dev/stderr) @@ -668,7 +629,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.autoGenerate=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .command | any(contains("-encrypt=\"${GOSSIP_KEY}\""))' \ @@ -680,7 +640,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.secretKey=bar' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) @@ -691,7 +650,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.secretName=foo' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) @@ -702,7 +660,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.secretKey=foo' \ --set 'global.gossipEncryption.secretName=bar' \ . | tee /dev/stderr | @@ -714,7 +671,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .command | join(" ") | contains("encrypt")' | tee /dev/stderr) [ "${actual}" = "false" ] @@ -724,7 +680,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.secretKey=foo' \ --set 'global.gossipEncryption.secretName=bar' \ . | tee /dev/stderr | @@ -739,7 +694,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) @@ -750,7 +704,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-key")' | tee /dev/stderr) @@ -761,7 +714,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "consul-client-cert")' | tee /dev/stderr) @@ -772,7 +724,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=false' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8501)' | tee /dev/stderr) @@ -783,7 +734,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8501)' | tee /dev/stderr) @@ -794,7 +744,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=false' \ . | tee /dev/stderr | @@ -806,7 +755,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=true' \ . | tee /dev/stderr | @@ -818,7 +766,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=false' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("http://127.0.0.1:8500")' | tee /dev/stderr) @@ -829,7 +776,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("https://127.0.0.1:8501")' | tee /dev/stderr) @@ -840,7 +786,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("-k")' | tee /dev/stderr) @@ -851,7 +796,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=true' \ . | tee /dev/stderr | @@ -863,7 +807,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "client-tls-init") | length > 0' | tee /dev/stderr) @@ -874,7 +817,6 @@ load _helpers cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | @@ -889,7 +831,6 @@ load _helpers cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) @@ -903,7 +844,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[0].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) @@ -915,7 +855,6 @@ load _helpers cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | @@ -929,7 +868,6 @@ load _helpers cd `chart_dir` local has_acl_init_container=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | @@ -939,7 +877,6 @@ load _helpers local has_tls_init_container=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | @@ -952,7 +889,6 @@ load _helpers cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) @@ -969,7 +905,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) @@ -989,10 +924,8 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.peering.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) @@ -1012,10 +945,8 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.peering.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) @@ -1035,7 +966,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.verify=false' \ . | tee /dev/stderr | @@ -1056,7 +986,6 @@ load _helpers cd `chart_dir` local spec=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.caCert.secretName=foo-ca-cert' \ --set 'global.tls.caCert.secretKey=key' \ @@ -1090,7 +1019,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1102,7 +1030,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1128,7 +1055,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1140,7 +1066,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1152,7 +1077,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1164,7 +1088,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1179,7 +1102,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraEnvironmentVars.custom_proxy=fakeproxy' \ --set 'client.extraEnvironmentVars.no_proxy=custom_no_proxy' \ . | tee /dev/stderr | @@ -1201,7 +1123,6 @@ load _helpers cd `chart_dir` local volume_name=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.volumes[] | select(.name == "aclconfig") | .name' | tee /dev/stderr) @@ -1212,7 +1133,6 @@ load _helpers cd `chart_dir` local mount_path=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[] | select(.name == "consul") | .volumeMounts[] | select(.name == "aclconfig") | .mountPath' | tee /dev/stderr) @@ -1223,7 +1143,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("/consul/aclconfig"))' | tee /dev/stderr) @@ -1234,7 +1153,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) @@ -1276,7 +1194,6 @@ local actual=$(echo $object | cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=default' \ @@ -1329,7 +1246,6 @@ local actual=$(echo $object | cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=false' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[] | select(.name == "consul") | .env[] | .name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) @@ -1340,7 +1256,6 @@ local actual=$(echo $object | cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[] | select(.name == "consul") | .env[] | .name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) @@ -1351,7 +1266,6 @@ local actual=$(echo $object | cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul logout"))' | tee /dev/stderr) @@ -1362,7 +1276,6 @@ local actual=$(echo $object | cd `chart_dir` local volume=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | yq '.spec.template.spec.volumes[] | select(.name == "consul-data")' | tee /dev/stderr) @@ -1379,7 +1292,6 @@ local actual=$(echo $object | cd `chart_dir` local volume_mount=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | yq '.spec.template.spec.containers[] | select(.name == "consul") | .volumeMounts[] | select(.name == "consul-data")' | tee /dev/stderr) @@ -1396,7 +1308,6 @@ local actual=$(echo $object | cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | yq '.spec.template.spec.initContainers[0].volumeMounts[1]' | tee /dev/stderr) @@ -1417,7 +1328,6 @@ local actual=$(echo $object | cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ . | yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | .volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) @@ -1439,7 +1349,6 @@ local actual=$(echo $object | cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=false' \ . | yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | .volumeMounts[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) @@ -1450,7 +1359,6 @@ local actual=$(echo $object | cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ @@ -1464,7 +1372,6 @@ local actual=$(echo $object | cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ @@ -1484,7 +1391,6 @@ local actual=$(echo $object | cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ --set 'externalServers.enabled=true' \ @@ -1502,7 +1408,6 @@ local actual=$(echo $object | cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ @@ -1518,7 +1423,6 @@ local actual=$(echo $object | cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ @@ -1534,7 +1438,6 @@ local actual=$(echo $object | cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ @@ -1551,7 +1454,6 @@ local actual=$(echo $object | cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=false' \ --set 'server.enabled=false' \ @@ -1568,7 +1470,6 @@ local actual=$(echo $object | cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=false' \ --set 'server.enabled=false' \ @@ -1584,7 +1485,6 @@ local actual=$(echo $object | cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ @@ -1604,7 +1504,6 @@ local actual=$(echo $object | local actual=$(helm template \ -s templates/client-daemonset.yaml \ --set 'client.enabled=true' \ - --set 'client.enabled=true' \ --set 'client.exposeGossipPorts=false' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name=="ADVERTISE_IP") | .valueFrom.fieldRef.fieldPath' | @@ -1617,7 +1516,6 @@ local actual=$(echo $object | local actual=$(helm template \ -s templates/client-daemonset.yaml \ --set 'client.enabled=true' \ - --set 'client.enabled=true' \ --set 'client.exposeGossipPorts=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name=="ADVERTISE_IP") | .valueFrom.fieldRef.fieldPath' | @@ -1629,7 +1527,6 @@ local actual=$(echo $object | cd `chart_dir` local has_exposed_host_ports=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.enabled=true' \ --set 'client.enabled=true' \ . | tee /dev/stderr | @@ -1643,7 +1540,6 @@ local actual=$(echo $object | local has_exposed_host_ports=$(helm template \ -s templates/client-daemonset.yaml \ --set 'client.enabled=true' \ - --set 'client.enabled=true' \ --set 'client.exposeGossipPorts=true' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[] | select(.name=="consul") | .ports[] | select(.containerPort==8301)] | all(has("hostPort"))' | @@ -1659,7 +1555,6 @@ local actual=$(echo $object | # Test that hostPath is set to null. local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "data") | .hostPath == null' | tee /dev/stderr ) [ "${actual}" = "true" ] @@ -1667,7 +1562,6 @@ local actual=$(echo $object | # Test that emptyDir is set instead. local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "data") | .emptyDir == {}' | tee /dev/stderr ) [ "${actual}" = "true" ] @@ -1677,7 +1571,6 @@ local actual=$(echo $object | cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "data") | .hostPath.path == "/opt/consul"' | tee /dev/stderr) @@ -1691,7 +1584,6 @@ local actual=$(echo $object | cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.dnsPolicy == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -1701,13 +1593,34 @@ local actual=$(echo $object | cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.dnsPolicy=ClusterFirstWithHostNet' \ . | tee /dev/stderr | yq '.spec.template.spec.dnsPolicy == "ClusterFirstWithHostNet"' | tee /dev/stderr) [ "${actual}" = "true" ] } +#-------------------------------------------------------------------- +# DNS + +@test "client/DaemonSet: recursor flags is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("$recursor_flags")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: add recursor flags if dns.enableRedirection is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'dns.enableRedirection=true' \ + . | tee /dev/stderr | + yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("$recursor_flags")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # hostNetwork @@ -1715,7 +1628,6 @@ local actual=$(echo $object | cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.hostNetwork == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -1725,7 +1637,6 @@ local actual=$(echo $object | cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.hostNetwork=true' \ . | tee /dev/stderr | yq '.spec.template.spec.hostNetwork == true' | tee /dev/stderr) @@ -1738,7 +1649,6 @@ local actual=$(echo $object | cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | \ yq '.spec.updateStrategy == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -1752,7 +1662,6 @@ rollingUpdate: " local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set "client.updateStrategy=${updateStrategy}" \ . | tee /dev/stderr | \ yq -c '.spec.updateStrategy == {"type":"RollingUpdate","rollingUpdate":{"maxUnavailable":5}}' | tee /dev/stderr) @@ -1766,7 +1675,6 @@ rollingUpdate: cd `chart_dir` local has_security_context=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec | has("securityContext")' | tee /dev/stderr) @@ -1780,7 +1688,6 @@ rollingUpdate: cd `chart_dir` local security_context=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.securityContext' | tee /dev/stderr) @@ -1801,7 +1708,6 @@ rollingUpdate: cd `chart_dir` local security_context=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.securityContext.runAsNonRoot=false' \ --set 'client.securityContext.privileged=true' \ . | tee /dev/stderr | @@ -1821,7 +1727,6 @@ rollingUpdate: cd `chart_dir` local manifest=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=false' \ --set 'global.acls.manageSystemACLs=true' \ @@ -1847,7 +1752,6 @@ rollingUpdate: cd `chart_dir` local manifest=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=false' \ @@ -1874,7 +1778,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ . | tee /dev/stderr | @@ -1886,7 +1789,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ . | tee /dev/stderr | @@ -1898,7 +1800,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ . | tee /dev/stderr | @@ -1910,7 +1811,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ --set 'global.acls.manageSystemACLs=true' \ @@ -1923,7 +1823,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ --set 'global.acls.manageSystemACLs=true' \ @@ -1936,7 +1835,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ --set 'global.acls.manageSystemACLs=true' \ @@ -1949,7 +1847,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=' \ --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ . @@ -1961,7 +1858,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=' \ . @@ -1975,7 +1871,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.recursors[0]=1.2.3.4' \ . | tee /dev/stderr | yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("-recursor=\"1.2.3.4\"")' | tee /dev/stderr) @@ -1988,8 +1883,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ . | tee /dev/stderr | yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("partition = \"default\"")' | tee /dev/stderr) @@ -2000,8 +1893,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=test' \ --set 'server.enabled=false' \ @@ -2016,11 +1907,10 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=test' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "global.adminPartitions.name has to be \"default\" in the server cluster" ]] } @@ -2029,10 +1919,10 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.federation.enabled=true' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "If global.federation.enabled is true, global.adminPartitions.enabled must be false because they are mutually exclusive" ]] } @@ -2046,7 +1936,6 @@ rollingUpdate: # Test that it defines the extra container local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraContainers[0].image=test-image' \ --set 'client.extraContainers[0].name=test-container' \ --set 'client.extraContainers[0].ports[0].name=test-port' \ @@ -2092,7 +1981,6 @@ rollingUpdate: local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraContainers[0].image=test-image' \ --set 'client.extraContainers[0].name=test-container' \ --set 'client.extraContainers[1].image=test-image' \ @@ -2109,7 +1997,6 @@ rollingUpdate: local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers | length' | tee /dev/stderr) @@ -2123,7 +2010,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ . @@ -2135,7 +2021,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -2149,7 +2034,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2166,7 +2050,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2182,7 +2065,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2198,7 +2080,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata' | tee /dev/stderr) @@ -2214,7 +2095,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2234,7 +2114,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2257,7 +2136,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2282,7 +2160,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2299,7 +2176,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2317,7 +2193,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2335,7 +2210,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2354,7 +2228,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2380,7 +2253,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2418,7 +2290,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2441,7 +2312,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2467,7 +2337,6 @@ rollingUpdate: cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2486,7 +2355,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2501,7 +2369,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2516,7 +2383,6 @@ rollingUpdate: cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.secretsBackend.vault.manageSystemACLsRole=true' \ --set 'global.acls.replicationToken.secretName=replication' \ @@ -2540,7 +2406,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ @@ -2564,7 +2429,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2577,7 +2441,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2594,230 +2457,9 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.imageK8s=something' \ . - [ "$status" -eq 1 ] - [[ "$output" =~ "global.imageK8s is not a valid key, use global.imageK8S (note the capital 'S')" ]] -} - -#-------------------------------------------------------------------- -# global.cloud -@test "client/DaemonSet: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} -@test "client/DaemonSet: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] + [[ "$output" =~ "global.imageK8s is not a valid key, use global.imageK8S (note the capital 'S')" ]] } diff --git a/charts/consul/test/unit/client-podsecuritypolicy.bats b/charts/consul/test/unit/client-podsecuritypolicy.bats index 3d7b628389..a37d4ec147 100644 --- a/charts/consul/test/unit/client-podsecuritypolicy.bats +++ b/charts/consul/test/unit/client-podsecuritypolicy.bats @@ -22,7 +22,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ . | tee /dev/stderr | yq -s 'length > 0' | tee /dev/stderr) @@ -33,7 +32,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ . | tee /dev/stderr | yq -c '.spec.hostPorts' | tee /dev/stderr) @@ -47,7 +45,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.grpc=false' \ . | tee /dev/stderr | @@ -62,7 +59,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.exposeGossipPorts=true' \ . | tee /dev/stderr | @@ -77,7 +73,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ . | tee /dev/stderr | yq '.spec.volumes | any(contains("hostPath"))' | tee /dev/stderr) @@ -89,7 +84,6 @@ load _helpers # Test that hostPath is an allowed volume type. local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | @@ -99,7 +93,6 @@ load _helpers # Test that the path we're allowed to write to is the right one. local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | @@ -114,7 +107,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | @@ -126,7 +118,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=true' \ @@ -142,7 +133,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.hostNetwork=true' \ . | tee /dev/stderr | @@ -155,7 +145,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.hostNetwork=true' \ . | tee /dev/stderr | @@ -170,7 +159,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ . | tee /dev/stderr | yq '.spec.hostNetwork == false' | tee /dev/stderr) diff --git a/charts/consul/test/unit/client-role.bats b/charts/consul/test/unit/client-role.bats index ad9bf86702..066e4ad98d 100644 --- a/charts/consul/test/unit/client-role.bats +++ b/charts/consul/test/unit/client-role.bats @@ -2,11 +2,13 @@ load _helpers -@test "client/Role: disabled by default" { +@test "client/Role: enabled by default" { cd `chart_dir` - assert_empty helm template \ + local actual=$(helm template \ -s templates/client-role.yaml \ - . + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] } @test "client/Role: disabled with global.enabled=false" { diff --git a/charts/consul/test/unit/client-rolebinding.bats b/charts/consul/test/unit/client-rolebinding.bats index d2dd375f19..2c5912eda8 100644 --- a/charts/consul/test/unit/client-rolebinding.bats +++ b/charts/consul/test/unit/client-rolebinding.bats @@ -2,6 +2,15 @@ load _helpers +@test "client/RoleBinding: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-rolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "client/RoleBinding: disabled with global.enabled=false" { cd `chart_dir` assert_empty helm template \ diff --git a/charts/consul/test/unit/client-securitycontextconstraints.bats b/charts/consul/test/unit/client-securitycontextconstraints.bats index 4efbda998b..c8901f7e43 100644 --- a/charts/consul/test/unit/client-securitycontextconstraints.bats +++ b/charts/consul/test/unit/client-securitycontextconstraints.bats @@ -22,7 +22,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq -s 'length > 0' | tee /dev/stderr) @@ -33,7 +32,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq -c '.allowHostPorts' | tee /dev/stderr) @@ -48,7 +46,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq '.volumes | any(contains("hostPath"))' | tee /dev/stderr) @@ -60,7 +57,6 @@ load _helpers # Test that hostPath is an allowed volume type. local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | @@ -70,7 +66,6 @@ load _helpers # Test that the path we're allowed to write to host path. local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | @@ -85,7 +80,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ --set 'client.hostNetwork=true' \ . | tee /dev/stderr | @@ -97,7 +91,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq '.allowHostNetwork == false' | tee /dev/stderr) diff --git a/charts/consul/test/unit/client-serviceaccount.bats b/charts/consul/test/unit/client-serviceaccount.bats index d8a717a95d..429470207a 100644 --- a/charts/consul/test/unit/client-serviceaccount.bats +++ b/charts/consul/test/unit/client-serviceaccount.bats @@ -2,6 +2,15 @@ load _helpers +@test "client/ServiceAccount: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "client/ServiceAccount: disabled with global.enabled=false" { cd `chart_dir` assert_empty helm template \ @@ -46,7 +55,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-serviceaccount.yaml \ - --set 'client.enabled=true' \ --set 'global.imagePullSecrets[0].name=my-secret' \ --set 'global.imagePullSecrets[1].name=my-secret2' \ . | tee /dev/stderr) @@ -67,7 +75,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-serviceaccount.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.metadata.annotations | length > 0' | tee /dev/stderr) [ "${actual}" = "false" ] @@ -77,7 +84,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-serviceaccount.yaml \ - --set 'client.enabled=true' \ --set "client.serviceAccount.annotations=foo: bar" \ . | tee /dev/stderr | yq -r '.metadata.annotations.foo' | tee /dev/stderr) diff --git a/charts/consul/test/unit/client-snapshot-agent-deployment.bats b/charts/consul/test/unit/client-snapshot-agent-deployment.bats new file mode 100644 index 0000000000..166b1d45e7 --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-deployment.bats @@ -0,0 +1,1094 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentDeployment: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + . +} + +@test "client/SnapshotAgentDeployment: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . +} + +@test "client/SnapshotAgentDeployment: when client.snapshotAgent.configSecret.secretKey!=null and client.snapshotAgent.configSecret.secretName=null, fail" { + cd `chart_dir` + run helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=' \ + --set 'client.snapshotAgent.configSecret.secretKey=bar' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." ]] +} + +@test "client/SnapshotAgentDeployment: when client.snapshotAgent.configSecret.secretName!=null and client.snapshotAgent.configSecret.secretKey=null, fail" { + cd `chart_dir` + run helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=foo' \ + --set 'client.snapshotAgent.configSecret.secretKey=' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." ]] +} + +@test "client/SnapshotAgentDeployment: adds volume for snapshot agent config secret when secret is configured" { + cd `chart_dir` + local vol=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-config")' | tee /dev/stderr) + local actual + actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) + [ "${actual}" = 'snapshot-config' ] + + actual=$(echo $vol | jq -r '. .secret.secretName' | tee /dev/stderr) + [ "${actual}" = 'a/b/c/d' ] + + actual=$(echo $vol | jq -r '. .secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = 'snapshot-agent-config' ] + + actual=$(echo $vol | jq -r '. .secret.items[0].path' | tee /dev/stderr) + [ "${actual}" = 'snapshot-config.json' ] +} + +@test "client/SnapshotAgentDeployment: adds volume mount to snapshot container for snapshot agent config secret when secret is configured" { + cd `chart_dir` + local vol=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "snapshot-config")' | tee /dev/stderr) + local actual + actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) + [ "${actual}" = 'snapshot-config' ] + + actual=$(echo $vol | jq -r '. .readOnly' | tee /dev/stderr) + [ "${actual}" = 'true' ] + + actual=$(echo $vol | jq -r '. .mountPath' | tee /dev/stderr) + [ "${actual}" = '/consul/config' ] +} + +@test "client/SnapshotAgentDeployment: set config-dir argument on snapshot agent command to volume mount" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-config-dir=/consul/config")' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "client/SnapshotAgentDeployment: no tolerations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: populates tolerations when client.tolerations is populated" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.tolerations=allow' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations | contains("allow")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "client/SnapshotAgentDeployment: no priorityClassName by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.priorityClassName | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: populates priorityClassName when client.priorityClassName is populated" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.priorityClassName=allow' \ + . | tee /dev/stderr | + yq '.spec.template.spec.priorityClassName | contains("allow")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.acls.manageSystemACLs + +@test "clientSnapshotAgent/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("/bin/consul logout"))' | tee /dev/stderr) + [ "${object}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "clientSnapshotAgent/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[2].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "snapshot-agent-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "snapshot-agent-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "snapshot-agent-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "snapshot-agent-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "client/SnapshotAgentDeployment: no nodeSelector by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: populates nodeSelector when client.nodeSelector is populated" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.nodeSelector=allow' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector | contains("allow")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "client/SnapshotAgentDeployment: sets TLS env vars when global.tls.enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "client/SnapshotAgentDeployment: populates volumes when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: populates container volumeMounts when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: can overwrite CA with the provided secret" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that it uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "client/SnapshotAgentDeployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: adds both init containers when TLS with auto-encrypt and ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length == 2' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "client/SnapshotAgentDeployment: default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"50m","memory":"50Mi"},"requests":{"cpu":"50m","memory":"50Mi"}}' ] +} + +@test "client/SnapshotAgentDeployment: can set resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.resources.requests.memory=100Mi' \ + --set 'client.snapshotAgent.resources.requests.cpu=100m' \ + --set 'client.snapshotAgent.resources.limits.memory=200Mi' \ + --set 'client.snapshotAgent.resources.limits.cpu=200m' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] +} + +#-------------------------------------------------------------------- +# client.snapshotAgent.caCert + +@test "client/SnapshotAgentDeployment: if caCert is set command is modified correctly" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("cat < /extra-ssl-certs/custom-ca.pem")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: if caCert is set extra-ssl-certs volumeMount is added" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | yq -r '.spec.template.spec' | tee /dev/stderr) + + local actual=$(echo $object | jq -r '.volumes[0].name' | tee /dev/stderr) + [ "${actual}" = "extra-ssl-certs" ] + + local actual=$(echo $object | jq -r '.containers[0].volumeMounts[0].name' | tee /dev/stderr) + [ "${actual}" = "extra-ssl-certs" ] +} + +@test "client/SnapshotAgentDeployment: if caCert is set SSL_CERT_DIR env var is set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].env[0]' | tee /dev/stderr) + + local actual=$(echo $object | jq -r '.name' | tee /dev/stderr) + [ "${actual}" = "SSL_CERT_DIR" ] + local actual=$(echo $object | jq -r '.value' | tee /dev/stderr) + [ "${actual}" = "/etc/ssl/certs:/extra-ssl-certs" ] +} + +#-------------------------------------------------------------------- +# license-autoload + +@test "client/SnapshotAgentDeployment: adds volume for license secret when enterprise license secret name and key are provided" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = '{"name":"consul-license","secret":{"secretName":"foo"}}' ] +} + +@test "client/SnapshotAgentDeployment: adds volume mount for license secret when enterprise license secret name and key are provided" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = '{"name":"consul-license","mountPath":"/consul/license","readOnly":true}' ] +} + +@test "client/SnapshotAgentDeployment: adds env var for license path when enterprise license secret name and key are provided" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].env[] | select(.name == "CONSUL_LICENSE_PATH")' | tee /dev/stderr) + [ "${actual}" = '{"name":"CONSUL_LICENSE_PATH","value":"/consul/license/bar"}' ] +} + +@test "client/SnapshotAgentDeployment: does not add license secret volume if manageSystemACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: does not add license secret volume mount if manageSystemACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: does not add license env if manageSystemACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].env[] | select(.name == "CONSUL_LICENSE_PATH")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "client/SnapshotAgentDeployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# Vault + +@test "client/SnapshotAgentDeployment: configures server CA to come from vault when vault is enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check annotations + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "carole" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = "foo" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] +} + +@test "client/SnapshotAgentDeployment: vault CA is not configured by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: vault CA is not configured when secretName is set but secretKey is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: vault CA is not configured when secretKey is set but secretName is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: vault CA is configured when both secretName and secretKey are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +@test "client/SnapshotAgentDeployment: vault enterprise license annotations are correct when enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.enterpriseLicense.secretName=path/to/secret' \ + --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-enterpriselicense.txt"]' | tee /dev/stderr) + [ "${actual}" = "path/to/secret" ] + local actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-enterpriselicense.txt"]' | tee /dev/stderr) + local actual="$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-enterpriselicense.txt"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"path/to/secret\" -}}\n{{- .Data.data.enterpriselicense -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] +} + +@test "client/SnapshotAgentDeployment: vault CONSUL_LICENSE_PATH is set to /vault/secrets/enterpriselicense.txt" { + cd `chart_dir` + local env=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.enterpriseLicense.secretName=a/b/c/d' \ + --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LICENSE_PATH") | .value' | tee /dev/stderr) + [ "${actual}" = "/vault/secrets/enterpriselicense.txt" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume for license secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.enterpriseLicense.secretName=a/b/c/d' \ + --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume mount for license secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.enterpriseLicense.secretName=a/b/c/d' \ + --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault snapshot agent config annotations are correct when enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulSnapshotAgentRole=bar' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=path/to/secret' \ + --set 'client.snapshotAgent.configSecret.secretKey=config' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json"]' | tee /dev/stderr) + [ "${actual}" = "path/to/secret" ] + + actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json"]' | tee /dev/stderr) + local expected=$'{{- with secret \"path/to/secret\" -}}\n{{- .Data.data.config -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + actual=$(echo $object | jq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume for snapshot agent config secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-config")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume mount for snapshot agent config secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "snapshot-config")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault sets config-file argument on snapshot agent command to config downloaded by vault agent injector" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-config-file=/vault/secrets/snapshot-agent-config.json")' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + +#-------------------------------------------------------------------- +# Vault agent annotations + +@test "client/SnapshotAgentDeployment: no vault agent annotations defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "client/SnapshotAgentDeployment: vault agent annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + + +@test "client/SnapshotAgentDeployment: vault properly sets vault role when global.secretsBackend.vault.consulCARole is set but global.secretsBackend.vault.consulSnapshotAgentRole is not set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=ca-role' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "ca-role" ] +} + +@test "client/SnapshotAgentDeployment: vault properly sets vault role when global.secretsBackend.vault.consulSnapshotAgentRole is set but global.secretsBackend.vault.consulCARole is not set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulSnapshotAgentRole=sa-role' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "sa-role" ] +} + +@test "client/SnapshotAgentDeployment: vault properly sets vault role to global.secretsBackend.vault.consulSnapshotAgentRole value when both global.secretsBackend.vault.consulSnapshotAgentRole and global.secretsBackend.vault.consulCARole are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulSnapshotAgentRole=sa-role' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + --set 'global.secretsBackend.vault.consulCARole=ca-role' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "sa-role" ] +} + +@test "client/SnapshotAgentDeployment: interval defaults to 1h" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-interval=1h")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: interval can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.interval=10h34m5s' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-interval=10h34m5s")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/client-snapshot-agent-podsecuritypolicy.bats b/charts/consul/test/unit/client-snapshot-agent-podsecuritypolicy.bats new file mode 100644 index 0000000000..21c55af314 --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-podsecuritypolicy.bats @@ -0,0 +1,30 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentPodSecurityPolicy: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-podsecuritypolicy.yaml \ + . +} + +@test "client/SnapshotAgentPodSecurityPolicy: disabled with snapshot agent disabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-podsecuritypolicy.yaml \ + --set 'client.snapshotAgent.enabled=false' \ + --set 'global.enablePodSecurityPolicies=true' \ + . +} + +@test "client/SnapshotAgentPodSecurityPolicy: enabled with snapshot agent enabled global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-podsecuritypolicy.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/client-snapshot-agent-role.bats b/charts/consul/test/unit/client-snapshot-agent-role.bats new file mode 100644 index 0000000000..86aaaf3880 --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-role.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentRole: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-role.yaml \ + . +} + +@test "client/SnapshotAgentRole: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-role.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentRole: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-role.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentRole: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-role.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "client/SnapshotAgentRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-role.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[0].resources[0]' | tee /dev/stderr) + [ "${actual}" = "podsecuritypolicies" ] +} diff --git a/charts/consul/test/unit/client-snapshot-agent-rolebinding.bats b/charts/consul/test/unit/client-snapshot-agent-rolebinding.bats new file mode 100644 index 0000000000..f061610955 --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-rolebinding.bats @@ -0,0 +1,40 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentRoleBinding: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-rolebinding.yaml \ + . +} + +@test "client/SnapshotAgentRoleBinding: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-rolebinding.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentRoleBinding: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-rolebinding.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentRoleBinding: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-rolebinding.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . +} diff --git a/charts/consul/test/unit/client-snapshot-agent-serviceaccount.bats b/charts/consul/test/unit/client-snapshot-agent-serviceaccount.bats new file mode 100644 index 0000000000..30d7ada58b --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-serviceaccount.bats @@ -0,0 +1,83 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentServiceAccount: disabled by default" { + cd `chart_dir` + assert_empty helm template -s templates/client-snapshot-agent-serviceaccount.yaml . +} + +@test "client/SnapshotAgentServiceAccount: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentServiceAccount: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentServiceAccount: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . +} + +#-------------------------------------------------------------------- +# global.imagePullSecrets + +@test "client/SnapshotAgentServiceAccount: can set image pull secrets" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.imagePullSecrets[0].name=my-secret' \ + --set 'global.imagePullSecrets[1].name=my-secret2' \ + . | tee /dev/stderr) + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[0].name' | tee /dev/stderr) + [ "${actual}" = "my-secret" ] + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[1].name' | tee /dev/stderr) + [ "${actual}" = "my-secret2" ] +} + +#-------------------------------------------------------------------- +# client.snapshotAgent.serviceAccount.annotations + +@test "client/SnapshotAgentServiceAccount: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.metadata.annotations | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentServiceAccount: annotations when enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set "client.snapshotAgent.serviceAccount.annotations=foo: bar" \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} diff --git a/charts/consul/test/unit/cni-daemonset.bats b/charts/consul/test/unit/cni-daemonset.bats index 17c80d2da0..0c423abfec 100644 --- a/charts/consul/test/unit/cni-daemonset.bats +++ b/charts/consul/test/unit/cni-daemonset.bats @@ -37,6 +37,7 @@ load _helpers --set 'connectInject.enabled=false' \ -s templates/cni-daemonset.yaml \ . + [ "$status" -eq 1 ] [[ "$output" =~ "connectInject.enabled must be true if connectInject.cni.enabled is true" ]] } diff --git a/charts/consul/test/unit/connect-inject-clusterrole.bats b/charts/consul/test/unit/connect-inject-clusterrole.bats index ce161d3141..d9dd5933ca 100644 --- a/charts/consul/test/unit/connect-inject-clusterrole.bats +++ b/charts/consul/test/unit/connect-inject-clusterrole.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/ClusterRole: enabled by default" { +@test "connectInject/ClusterRole: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-clusterrole.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/ClusterRole: enabled with global.enabled false" { diff --git a/charts/consul/test/unit/connect-inject-clusterrolebinding.bats b/charts/consul/test/unit/connect-inject-clusterrolebinding.bats index 28921d31f2..ccf30083f9 100644 --- a/charts/consul/test/unit/connect-inject-clusterrolebinding.bats +++ b/charts/consul/test/unit/connect-inject-clusterrolebinding.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/ClusterRoleBinding: enabled by default" { +@test "connectInject/ClusterRoleBinding: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-clusterrolebinding.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/ClusterRoleBinding: enabled with global.enabled false" { @@ -29,4 +27,4 @@ load _helpers -s templates/connect-inject-clusterrolebinding.yaml \ --set 'connectInject.enabled=false' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/connect-inject-deployment.bats b/charts/consul/test/unit/connect-inject-deployment.bats index 79238010ab..8ab6c3855f 100755 --- a/charts/consul/test/unit/connect-inject-deployment.bats +++ b/charts/consul/test/unit/connect-inject-deployment.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/Deployment: enabled by default" { +@test "connectInject/Deployment: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-deployment.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/Deployment: enable with global.enabled false, client.enabled true" { @@ -35,38 +33,139 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=-' \ --set 'global.enabled=false' \ . } -@test "connectInject/Deployment: consul env defaults" { +@test "connectInject/Deployment: fails if global.enabled=false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled for connect injection" ]] +} + +@test "connectInject/Deployment: fails if global.enabled=true and client.enabled=false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled for connect injection" ]] +} + +@test "connectInject/Deployment: fails if global.enabled=false and client.enabled=false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled for connect injection" ]] +} + +@test "connectInject/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true for connect injection" ]] +} + +@test "connectInject/Deployment: command defaults" { cd `chart_dir` - local env=$(helm template \ + local cmd=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("consul-k8s-control-plane inject-connect"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# connectInject.centralConfig [DEPRECATED] + +@test "connectInject/Deployment: fails if connectInject.centralConfig.enabled is set to false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "connectInject.centralConfig.enabled cannot be set to false; to disable, set enable_central_service_config to false in server.extraConfig and client.extraConfig" ]] +} + +@test "connectInject/Deployment: fails if connectInject.centralConfig.defaultProtocol is set" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.defaultProtocol=http' . + [ "$status" -eq 1 ] + [[ "$output" =~ "connectInject.centralConfig.defaultProtocol is no longer supported; instead you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" ]] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-server.default.svc" ] +@test "connectInject/Deployment: fails if connectInject.centralConfig.proxyDefaults is used" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.proxyDefaults="{\"key\":\"value\"}"' . + [ "$status" -eq 1 ] + [[ "$output" =~ "connectInject.centralConfig.proxyDefaults is no longer supported; instead you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" ]] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8502" ] +@test "connectInject/Deployment: does not fail if connectInject.centralConfig.enabled is set to true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8500" ] +@test "connectInject/Deployment: does not fail if connectInject.centralConfig.proxyDefaults is set to {}" { + cd `chart_dir` - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + # We have to actually create a values file for this test because the + # --set and --set-string flags were passing {} as a YAML object rather + # than a string. + # Previously this was the default in the values.yaml so this test is testing + # that if someone had copied this into their values.yaml then nothing would + # break. We no longer use this value, but that's okay because the default + # empty object had no effect. + temp_file=$(mktemp) + cat < "$temp_file" +connectInject: + enabled: true + centralConfig: + proxyDefaults: | + {} +EOF - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_API_TIMEOUT").value' | tee /dev/stderr) - [ "${actual}" = "5s" ] + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + -f "$temp_file" \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] + rm -f "$temp_file" } #-------------------------------------------------------------------- @@ -212,7 +311,7 @@ load _helpers } #-------------------------------------------------------------------- -# consul and consul-dataplane images +# consul and envoy images @test "connectInject/Deployment: container image is global default" { cd `chart_dir` @@ -260,17 +359,28 @@ load _helpers [ "${actual}" = "true" ] } -@test "connectInject/Deployment: consul-dataplane-image can be set via global" { +@test "connectInject/Deployment: envoy-image can be set via global" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.imageConsulDataplane=foo' \ + --set 'global.imageEnvoy=foo' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-consul-dataplane-image=\"foo\""))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-envoy-image=\"foo\""))' | tee /dev/stderr) [ "${actual}" = "true" ] } +@test "connectInject/Deployment: setting connectInject.imageEnvoy fails" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.imageEnvoy=new/image' . + [ "$status" -eq 1 ] + [[ "$output" =~ "connectInject.imageEnvoy must be specified in global" ]] +} + + #-------------------------------------------------------------------- # extra envoy args @@ -462,7 +572,7 @@ load _helpers #-------------------------------------------------------------------- # global.tls.enabled -@test "connectInject/Deployment: Adds consul-ca-cert volume when global.tls.enabled is true" { +@test "connectInject/Deployment: Adds tls-ca-cert volume when global.tls.enabled is true" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ @@ -473,7 +583,7 @@ load _helpers [ "${actual}" != "" ] } -@test "connectInject/Deployment: Adds consul-ca-cert volumeMount when global.tls.enabled is true" { +@test "connectInject/Deployment: Adds tls-ca-cert volumeMounts when global.tls.enabled is true" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ @@ -507,26 +617,83 @@ load _helpers [ "${actual}" = "key" ] } -@test "connectInject/Deployment: consul env vars when global.tls.enabled is true" { +@test "connectInject/Deployment: Adds -tls-cert-dir=/etc/connect-injector/certs to command" { cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-tls-cert-dir=/etc/connect-injector/certs"))' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "connectInject/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] +@test "connectInject/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_USE_TLS").value' | tee /dev/stderr) +@test "connectInject/Deployment: adds both init containers when TLS with auto-encrypt and ACLs + namespaces are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length == 2' | tee /dev/stderr) [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] +@test "connectInject/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] } #-------------------------------------------------------------------- @@ -608,37 +775,6 @@ load _helpers [ "${actual}" = "true" ] } -@test "connectInject/Deployment: consul env var default set with .global.adminPartitions.enabled=true" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "default" ] -} - -@test "connectInject/Deployment: consul env var set with .global.adminPartitions.enabled=true" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=foo' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] -} - @test "connectInject/Deployment: fails if namespaces are disabled and .global.adminPartitions.enabled=true" { cd `chart_dir` run helm template \ @@ -653,6 +789,17 @@ load _helpers #-------------------------------------------------------------------- # namespaces +@test "connectInject/Deployment: fails if namespaces are disabled and mirroringK8S is true" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enableConsulNamespaces=false' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.enableConsulNamespaces must be true if mirroringK8S=true" ]] +} + @test "connectInject/Deployment: namespace options disabled by default" { cd `chart_dir` local object=$(helm template \ @@ -697,20 +844,20 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "connectInject/Deployment: mirroring options omitted with .connectInject.consulNamespaces.mirroringK8S=false" { +@test "connectInject/Deployment: mirroring options set with .connectInject.consulNamespaces.mirroringK8S=true" { cd `chart_dir` local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ - --set 'connectInject.consulNamespaces.mirroringK8S=false' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) @@ -724,7 +871,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-k8s-namespace-mirroring=true"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + [ "${actual}" = "true" ] local actual=$(echo $object | yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -780,7 +927,7 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'connectInject.aclInjectToken.secretName=foo' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -795,125 +942,302 @@ load _helpers yq '[.spec.template.spec.containers[0].env[].name]' | tee /dev/stderr) local actual=$(echo $object | - yq 'any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + yq 'any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq 'map(select(test("CONSUL_ACL_TOKEN"))) | length' | tee /dev/stderr) + yq 'map(select(test("CONSUL_HTTP_TOKEN"))) | length' | tee /dev/stderr) [ "${actual}" = "1" ] } #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "connectInject/Deployment: ACL auth method env vars are set when acls are enabled" { +@test "connectInject/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` - local env=$(helm template \ + local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout -consul-api-timeout=5s"))' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_META").value' | tee /dev/stderr) - [ "${actual}" = 'component=connect-injector,pod=$(NAMESPACE)/$(POD_NAME)' ] + [ "${object}" = "true" ] } -@test "connectInject/Deployment: sets global auth method and primary datacenter when federation and acls and namespaces are enabled" { +@test "connectInject/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc1' \ - --set 'global.datacenter=dc2' \ - --set 'global.enableConsulNamespaces=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method-dc2" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -@test "connectInject/Deployment: sets default login partition and acls and partitions are enabled" { +@test "connectInject/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "default" ] + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "connectInject/Deployment: sets non-default login partition and acls and partitions are enabled" { +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { cd `chart_dir` - local env=$(helm template \ + local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=foo' \ - --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "connect-injector-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "connectInject/Deployment: cross namespace policy is not added when global.acls.manageSystemACLs=false" { +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "connectInject/Deployment: cross namespace policy is added when global.acls.manageSystemACLs=true" { +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] -} -#-------------------------------------------------------------------- -# resources + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] -@test "connectInject/Deployment: default resources" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "connectInject/Deployment: cross namespace policy is not added when global.acls.manageSystemACLs=false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: cross namespace policy is added when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "connectInject/Deployment: default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) @@ -1075,6 +1399,157 @@ load _helpers [ "${actual}" = "false" ] } +#-------------------------------------------------------------------- +# consul sidecar resources + +@test "connectInject/Deployment: default consul sidecar container resources" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request=25Mi"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request=20m"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit=50Mi"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit=20m"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: consul sidecar container resources can be set" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=100Mi' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=100m' \ + --set 'global.consulSidecarContainer.resources.limits.memory=200Mi' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=200m' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request=100Mi"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request=100m"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit=200Mi"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit=200m"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: consul sidecar container resources can be set explicitly to 0" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=0' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=0' \ + --set 'global.consulSidecarContainer.resources.limits.memory=0' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=0' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request=0"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request=0"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit=0"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit=0"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: consul sidecar container resources can be individually set to null" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=null' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=null' \ + --set 'global.consulSidecarContainer.resources.limits.memory=null' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=null' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: consul sidecar container resources can be set to null" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources=null' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: fails if global.lifecycleSidecarContainer is set" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.lifecycleSidecarContainer.resources.requests.memory=100Mi' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." ]] +} + #-------------------------------------------------------------------- # sidecarProxy.resources @@ -1359,55 +1834,207 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-enable-peering=true"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-enable-peering=true"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: -enable-peering=true is set when global.peering.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-peering=true"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: fails if peering is enabled but connect inject is not" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=false' \ + --set 'global.peering.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "setting global.peering.enabled to true requires connectInject.enabled to be true" ]] +} + +@test "connectInject/Deployment: -read-server-expose-service=true is set when global.peering.enabled is true and global.peering.tokenGeneration.serverAddresses.source is empty" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: -read-server-expose-service=true is set when servers are enabled and peering is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: -read-server-expose-service is not set when servers are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'server.enabled=false' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: -read-server-expose-service is not set when peering is disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: -read-server-expose-service is not set when global.peering.tokenGeneration.serverAddresses.source is set to consul" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'global.peering.tokenGeneration.serverAddresses.source=consul' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: fails server address source is an invalid value" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'global.peering.tokenGeneration.serverAddresses.source=notempty' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.peering.tokenGeneration.serverAddresses.source must be one of empty string, 'consul' or 'static'" ]] +} + +@test "connectInject/Deployment: -read-server-expose-service and -token-server-address is not set when global.peering.tokenGeneration.serverAddresses.source is consul" { + cd `chart_dir` + local command=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'global.peering.tokenGeneration.serverAddresses.source=consul' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address"))' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "connectInject/Deployment: -enable-peering=true is set when global.peering.enabled is true" { +@test "connectInject/Deployment: when servers are not enabled and externalServers.enabled=true, passes in -token-server-address flags with hosts" { cd `chart_dir` - local actual=$(helm template \ + local command=$(helm template \ -s templates/connect-inject-deployment.yaml \ + --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=1.2.3.4' \ + --set 'externalServers.hosts[1]=2.2.3.4' \ --set 'connectInject.enabled=true' \ --set 'global.peering.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-enable-peering=true"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"1.2.3.4:8503\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"2.2.3.4:8503\""))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "connectInject/Deployment: fails if peering is enabled but connect inject is not" { +@test "connectInject/Deployment: externalServers.grpcPort can be customized" { cd `chart_dir` - run helm template \ + local command=$(helm template \ -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=false' \ - --set 'global.peering.enabled=true' . - [ "$status" -eq 1 ] - [[ "$output" =~ "setting global.peering.enabled to true requires connectInject.enabled to be true" ]] + --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=1.2.3.4' \ + --set 'externalServers.hosts[1]=2.2.3.4' \ + --set 'externalServers.grpcPort=1234' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"1.2.3.4:1234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"2.2.3.4:1234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "connectInject/Deployment: fails if peering is enabled but tls is not" { +@test "connectInject/Deployment: when peering token generation source is static passes in -token-server-address flags with static addresses" { cd `chart_dir` - run helm template \ + local command=$(helm template \ -s templates/connect-inject-deployment.yaml \ + --set 'global.peering.tokenGeneration.serverAddresses.source=static' \ + --set 'global.peering.tokenGeneration.serverAddresses.static[0]=1.2.3.4:1234' \ + --set 'global.peering.tokenGeneration.serverAddresses.static[1]=2.2.3.4:2234' \ --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.peering.enabled=true' . - [ "$status" -eq 1 ] - [[ "$output" =~ "setting global.peering.enabled to true requires global.tls.enabled to be true" ]] + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"1.2.3.4:1234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"2.2.3.4:2234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "connectInject/Deployment: fails if peering is enabled but mesh gateways are not" { +@test "connectInject/Deployment: when peering token generation source is static and externalHosts are set, passes in -token-server-address flags with static addresses, not externalServers.hosts" { cd `chart_dir` - run helm template \ + local command=$(helm template \ -s templates/connect-inject-deployment.yaml \ + --set 'server.enabled=false' \ + --set 'global.peering.tokenGeneration.serverAddresses.source=static' \ + --set 'global.peering.tokenGeneration.serverAddresses.static[0]=1.2.3.4:1234' \ + --set 'global.peering.tokenGeneration.serverAddresses.static[1]=2.2.3.4:2234' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=1.1.1.1' \ + --set 'externalServers.hosts[1]=2.2.2.2' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.peering.enabled=true' . - [ "$status" -eq 1 ] - [[ "$output" =~ "setting global.peering.enabled to true requires meshGateway.enabled to be true" ]] + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"1.2.3.4:1234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"2.2.3.4:2234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -1464,22 +2091,33 @@ load _helpers } #-------------------------------------------------------------------- -# Vault +# get-auto-encrypt-client-ca -@test "connectInject/Deployment: CONSUL_CACERT env variable is set points to vault secrets when TLS and vault are enabled" { +@test "connectInject/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { cd `chart_dir` - local actual=$(helm template \ + local command=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulClientRole=foo' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env[] | select(.name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/vault/secrets/serverca.crt" ] + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -1491,6 +2129,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-enable-webhook-ca-update"))' | tee /dev/stderr) @@ -1503,6 +2142,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1529,6 +2169,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1549,6 +2190,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1570,6 +2212,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1591,6 +2234,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1613,6 +2257,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1630,6 +2275,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=connectInject/Deployment: enable-webhook-ca-update flag is not set on command when using vaulttest' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1647,6 +2293,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1668,6 +2315,7 @@ load _helpers --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'server.serverCert.secretName=pki_int/issue/test' \ --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ --set 'global.secretsBackend.vault.connectInjectRole=test' \ @@ -1700,7 +2348,7 @@ load _helpers local actual="$(echo $cmd | yq -r '.annotations["vault.hashicorp.com/secret-volume-path-ca.crt"]' | tee /dev/stderr)" [ "${actual}" = "/vault/secrets/connect-injector/certs" ] - + local actual="$(echo $cmd | yq -r '.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr)" [ "${actual}" = "true" ] @@ -1777,6 +2425,7 @@ load _helpers --set 'global.secretsBackend.vault.controllerRole=test' \ --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'server.serverCert.secretName=pki_int/issue/test' \ --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ . | tee /dev/stderr | @@ -1803,6 +2452,7 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ @@ -1825,6 +2475,7 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ @@ -1846,6 +2497,7 @@ load _helpers --set 'global.secretsBackend.vault.consulServerRole=foo' \ --set 'global.tls.enabled=true' \ --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ . | tee /dev/stderr | yq -r '.spec.template.metadata' | tee /dev/stderr) @@ -1879,6 +2531,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1921,335 +2574,3 @@ reservedNameTest() { [ "$status" -eq 1 ] [[ "$output" =~ "The name $name set for key connectInject.consulNamespaces.consulDestinationNamespace is reserved by Consul for future use" ]] } - -#-------------------------------------------------------------------- -# externalServers - -@test "connectInject/Deployment: fails if externalServers.hosts is not provided when externalServers.enabled is true" { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "externalServers.hosts must be set if externalServers.enabled is true" ]] -} - -@test "connectInject/Deployment: configures the sidecar-injector env to use external servers" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr)\ - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "consul" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8502" ] -} - -@test "connectInject/Deployment: can provide a different ports for the sidecar-injector when external servers are enabled" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.httpsPort=443' \ - --set 'externalServers.grpcPort=444' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr)\ - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "consul" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "443" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "444" ] -} - -@test "connectInject/Deployment: can provide a TLS server name for the sidecar-injector when external servers are enabled" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'global.tls.enabled=true' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.tlsServerName=foo' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_TLS_SERVER_NAME").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] -} - -@test "connectInject/Deployment: does not configure CA cert for the sidecar-injector when external servers with useSystemRoots are enabled" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.useSystemRoots=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$spec" | yq '.containers[0].env[] | select(.name == "CONSUL_CACERT_FILE")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -#-------------------------------------------------------------------- -# global.cloud - -@test "connectInject/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/consul/test/unit/connect-inject-mutatingwebhookconfiguration.bats b/charts/consul/test/unit/connect-inject-mutatingwebhookconfiguration.bats index efe9bb44d9..6745e690c3 100755 --- a/charts/consul/test/unit/connect-inject-mutatingwebhookconfiguration.bats +++ b/charts/consul/test/unit/connect-inject-mutatingwebhookconfiguration.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/MutatingWebhookConfiguration: enabled by default" { +@test "connectInject/MutatingWebhookConfiguration: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-mutatingwebhookconfiguration.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/MutatingWebhookConfiguration: enable with global.enabled false" { @@ -35,7 +33,6 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/connect-inject-mutatingwebhookconfiguration.yaml \ - --set 'connectInject.enabled=-' \ --set 'global.enabled=false' \ . } @@ -56,8 +53,6 @@ load _helpers local actual=$(helm template \ -s templates/connect-inject-mutatingwebhookconfiguration.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ . | tee /dev/stderr | yq '.webhooks[1].name | contains("peeringacceptors.consul.hashicorp.com")' | tee /dev/stderr) @@ -65,8 +60,6 @@ load _helpers local actual=$(helm template \ -s templates/connect-inject-mutatingwebhookconfiguration.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ . | tee /dev/stderr | yq '.webhooks[2].name | contains("peeringdialers.consul.hashicorp.com")' | tee /dev/stderr) diff --git a/charts/consul/test/unit/connect-inject-service.bats b/charts/consul/test/unit/connect-inject-service.bats index 2082ea2c0b..3831793156 100755 --- a/charts/consul/test/unit/connect-inject-service.bats +++ b/charts/consul/test/unit/connect-inject-service.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/Service: enabled by default" { +@test "connectInject/Service: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-service.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/Service: enable with global.enabled false" { @@ -35,7 +33,6 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/connect-inject-service.yaml \ - --set 'connectInject.enabled=-' \ --set 'global.enabled=false' \ . } diff --git a/charts/consul/test/unit/connect-inject-serviceaccount.bats b/charts/consul/test/unit/connect-inject-serviceaccount.bats index 2832ebc95d..07b38c3d49 100644 --- a/charts/consul/test/unit/connect-inject-serviceaccount.bats +++ b/charts/consul/test/unit/connect-inject-serviceaccount.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/ServiceAccount: enabled by default" { +@test "connectInject/ServiceAccount: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-serviceaccount.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/ServiceAccount: enabled with global.enabled false" { diff --git a/charts/consul/test/unit/connect-injector-disruptionbudget.bats b/charts/consul/test/unit/connect-injector-disruptionbudget.bats index ec8d449821..ec998d0750 100755 --- a/charts/consul/test/unit/connect-injector-disruptionbudget.bats +++ b/charts/consul/test/unit/connect-injector-disruptionbudget.bats @@ -2,13 +2,10 @@ load _helpers -@test "connect-injector/DisruptionBudget: enabled by default" { +@test "connect-injector/DisruptionBudget: disabled by default" { cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-injector-disruptionbudget.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + assert_empty helm template \ + -s templates/connect-injector-disruptionbudget.yaml . } @test "connect-injector/DisruptionBudget: enabled with connectInject=enabled , connectInject.disruptionBudget.enabled=true and global.enabled=true " { @@ -43,7 +40,6 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/connect-injector-disruptionbudget.yaml \ - --set 'connectInject.enabled=-' \ --set 'global.enabled=false' \ . } @@ -163,35 +159,3 @@ load _helpers # no flag to *remove* an API version so some Helm versions will always have # policy/v1 support and will always use that API version. - -#-------------------------------------------------------------------- -# minAvailable - -@test "connect-injector/DisruptionBudget: correct minAvailable when set" { - cd `chart_dir` - local tpl=$(helm template \ - -s templates/connect-injector-disruptionbudget.yaml \ - --set 'connectInject.replicas=1' \ - --set 'global.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.disruptionBudget.enabled=true' \ - --set 'connectInject.disruptionBudget.minAvailable=1' \ - . | tee /dev/stderr) - [ $(echo "$tpl" | yq '.spec.minAvailable') = "1" ] - [ $(echo "$tpl" | yq '.spec.maxUnavailable') = "null" ] -} - -@test "connect-injector/DisruptionBudget: correct minAvailable when set with maxUnavailable" { - cd `chart_dir` - local tpl=$(helm template \ - -s templates/connect-injector-disruptionbudget.yaml \ - --set 'connectInject.replicas=1' \ - --set 'global.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.disruptionBudget.enabled=true' \ - --set 'connectInject.disruptionBudget.minAvailable=1' \ - --set 'connectInject.disruptionBudget.maxUnavailable=2' \ - . | tee /dev/stderr) - [ $(echo "$tpl" | yq '.spec.minAvailable') = "1" ] - [ $(echo "$tpl" | yq '.spec.maxUnavailable') = "null" ] -} diff --git a/charts/consul/test/unit/controller-clusterrole.bats b/charts/consul/test/unit/controller-clusterrole.bats index 22f8ea654b..708d32d6be 100644 --- a/charts/consul/test/unit/controller-clusterrole.bats +++ b/charts/consul/test/unit/controller-clusterrole.bats @@ -2,13 +2,11 @@ load _helpers -@test "controller/ClusterRole: enabled by default" { +@test "controller/ClusterRole: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/controller-clusterrole.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "controller/ClusterRole: enabled with controller.enabled=true" { diff --git a/charts/consul/test/unit/controller-clusterrolebinding.bats b/charts/consul/test/unit/controller-clusterrolebinding.bats index 3582c0f7bf..b9777c95c6 100644 --- a/charts/consul/test/unit/controller-clusterrolebinding.bats +++ b/charts/consul/test/unit/controller-clusterrolebinding.bats @@ -2,13 +2,11 @@ load _helpers -@test "controller/ClusterRoleBinding: enabled by default" { +@test "controller/ClusterRoleBinding: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/controller-clusterrolebinding.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "controller/ClusterRoleBinding: enabled with controller.enabled=true" { diff --git a/charts/consul/test/unit/controller-deployment.bats b/charts/consul/test/unit/controller-deployment.bats index 7f32013cb0..87bb98b1f9 100644 --- a/charts/consul/test/unit/controller-deployment.bats +++ b/charts/consul/test/unit/controller-deployment.bats @@ -2,13 +2,11 @@ load _helpers -@test "controller/Deployment: enabled by default" { +@test "controller/Deployment: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/controller-deployment.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "controller/Deployment: enabled with controller.enabled=true" { @@ -21,6 +19,23 @@ load _helpers [ "${actual}" = "true" ] } +@test "controller/Deployment: command defaults" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("consul-k8s-control-plane controller"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # resourcePrefix @@ -98,6 +113,237 @@ load _helpers [ "${actual}" = "2" ] } +#-------------------------------------------------------------------- +# global.acls.manageSystemACLs + +@test "controller/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout -consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "controller-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when federation enabled in non-primary datacenter" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # global.tls.enabled @@ -157,6 +403,73 @@ load _helpers [ "${actual}" != "" ] } +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "controller/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: adds both init containers when TLS with auto-encrypt and ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length == 2' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + #-------------------------------------------------------------------- # partitions @@ -172,6 +485,19 @@ load _helpers [ "${actual}" = "false" ] } +@test "controller/Deployment: partition name set with .global.adminPartitions.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("partition=default"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + @test "controller/Deployment: fails if namespaces are disabled and .global.adminPartitions.enabled=true" { cd `chart_dir` run helm template \ @@ -229,7 +555,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -437,6 +763,112 @@ load _helpers [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] } +#-------------------------------------------------------------------- +# aclToken + +@test "controller/Deployment: aclToken enabled when secretName and secretKey is provided" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.aclToken.secretName=foo' \ + --set 'controller.aclToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: aclToken env is set when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: aclToken env is not set when ACLs are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# logLevel + +@test "controller/Deployment: logLevel info by default from global" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq '.containers[0].command | any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq '.initContainers[0].command | any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: logLevel can be overridden" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.logLevel=error' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq '.containers[0].command | any(contains("-log-level=error"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq '.initContainers[0].command | any(contains("-log-level=error"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "controller/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault @@ -801,260 +1233,4 @@ load _helpers [ "${actual}" = "bar" ] } -#-------------------------------------------------------------------- -# externalServers -@test "controller/Deployment: fails if externalServers.hosts is not provided when externalServers.enabled is true" { - cd `chart_dir` - run helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "externalServers.hosts must be set if externalServers.enabled is true" ]] -} - -@test "controller/Deployment: does not configure CA cert for the controller and acl-init containers when external servers with useSystemRoots are enabled" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.useSystemRoots=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo "$spec" | yq '.containers[0].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -#-------------------------------------------------------------------- -# global.cloud - -@test "controller/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "controller/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "controller/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "controller/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "controller/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "controller/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "controller/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "controller/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "controller/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "controller/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - - -@test "controller/Deployment: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/consul/test/unit/controller-leader-election-role.bats b/charts/consul/test/unit/controller-leader-election-role.bats index b3d8715911..3abf9c81dc 100644 --- a/charts/consul/test/unit/controller-leader-election-role.bats +++ b/charts/consul/test/unit/controller-leader-election-role.bats @@ -2,13 +2,11 @@ load _helpers -@test "controllerLeaderElection/Role: enabled by default" { +@test "controllerLeaderElection/Role: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/controller-leader-election-role.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "controllerLeaderElection/Role: enabled with controller.enabled=true" { diff --git a/charts/consul/test/unit/controller-leader-election-rolebinding.bats b/charts/consul/test/unit/controller-leader-election-rolebinding.bats index e83fca4928..94f31e01b8 100644 --- a/charts/consul/test/unit/controller-leader-election-rolebinding.bats +++ b/charts/consul/test/unit/controller-leader-election-rolebinding.bats @@ -2,13 +2,11 @@ load _helpers -@test "controllerLeaderElection/RoleBinding: enabled by default" { +@test "controllerLeaderElection/RoleBinding: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/controller-leader-election-rolebinding.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "controllerLeaderElection/RoleBinding: enabled with controller.enabled=true" { diff --git a/charts/consul/test/unit/controller-mutatingwebhookconfiguration.bats b/charts/consul/test/unit/controller-mutatingwebhookconfiguration.bats index 511f94ed77..b65c94666a 100644 --- a/charts/consul/test/unit/controller-mutatingwebhookconfiguration.bats +++ b/charts/consul/test/unit/controller-mutatingwebhookconfiguration.bats @@ -2,13 +2,11 @@ load _helpers -@test "controller/MutatingWebhookConfiguration: enabled by default" { +@test "controller/MutatingWebhookConfiguration: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/controller-mutatingwebhookconfiguration.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "controller/MutatingWebhookConfiguration: enabled with controller.enabled=true" { diff --git a/charts/consul/test/unit/controller-serviceaccount.bats b/charts/consul/test/unit/controller-serviceaccount.bats index f2e23385a8..3dd95cfea4 100644 --- a/charts/consul/test/unit/controller-serviceaccount.bats +++ b/charts/consul/test/unit/controller-serviceaccount.bats @@ -2,13 +2,11 @@ load _helpers -@test "controller/ServiceAccount: enabled by default" { +@test "controller/ServiceAccount: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/controller-serviceaccount.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "controller/ServiceAccount: enabled with controller.enabled=true" { diff --git a/charts/consul/test/unit/controller-webhook-service.bats b/charts/consul/test/unit/controller-webhook-service.bats index 938b9ffbe4..fc78e8e2e4 100644 --- a/charts/consul/test/unit/controller-webhook-service.bats +++ b/charts/consul/test/unit/controller-webhook-service.bats @@ -2,13 +2,11 @@ load _helpers -@test "controllerWebhook/Service: enabled by default" { +@test "controllerWebhook/Service: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/controller-webhook-service.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "controllerWebhook/Service: enabled with controller.enabled=true" { diff --git a/charts/consul/test/unit/crd-exportedservices.bats b/charts/consul/test/unit/crd-exportedservices.bats index 11ad3d53c0..cf1a35a587 100644 --- a/charts/consul/test/unit/crd-exportedservices.bats +++ b/charts/consul/test/unit/crd-exportedservices.bats @@ -2,16 +2,14 @@ load _helpers -@test "exportedServices/CustomResourceDefinition: enabled by default" { +@test "exportedServices/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-exportedservices.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "exportedServices/CustomResourceDefinition: enabled with controller.enabled=true" { +@test "exportedServices/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-exportedservices.yaml \ diff --git a/charts/consul/test/unit/crd-ingressgateways.bats b/charts/consul/test/unit/crd-ingressgateways.bats index 367972e627..315a22c8e7 100644 --- a/charts/consul/test/unit/crd-ingressgateways.bats +++ b/charts/consul/test/unit/crd-ingressgateways.bats @@ -2,20 +2,14 @@ load _helpers -@test "ingressGateway/CustomResourceDefinition: enabled by default" { +@test "ingressGateway/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-ingressgateways.yaml \ - . | tee /dev/stderr | - # The generated CRDs have "---" at the top which results in two objects - # being detected by yq, the first of which is null. We must therefore use - # yq -s so that length operates on both objects at once rather than - # individually, which would output false\ntrue and fail the test. - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "ingressGateway/CustomResourceDefinition: enabled with controller.enabled=true" { +@test "ingressGateway/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-ingressgateways.yaml \ @@ -28,11 +22,3 @@ load _helpers yq -s 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } - -@test "ingressGateway/CustomResourceDefinition: disabled with controller.enabled=false" { - cd `chart_dir` - assert_empty helm template \ - -s templates/crd-meshes.yaml \ - --set 'controller.enabled=false' \ - . -} diff --git a/charts/consul/test/unit/crd-meshes.bats b/charts/consul/test/unit/crd-meshes.bats index 50c95b4086..4ad7acf321 100644 --- a/charts/consul/test/unit/crd-meshes.bats +++ b/charts/consul/test/unit/crd-meshes.bats @@ -2,20 +2,14 @@ load _helpers -@test "mesh/CustomResourceDefinition: enabled by default" { +@test "mesh/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-meshes.yaml \ - . | tee /dev/stderr | - # The generated CRDs have "---" at the top which results in two objects - # being detected by yq, the first of which is null. We must therefore use - # yq -s so that length operates on both objects at once rather than - # individually, which would output false\ntrue and fail the test. - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "mesh/CustomResourceDefinition: enabled with controller.enabled=true" { +@test "mesh/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-meshes.yaml \ @@ -28,11 +22,3 @@ load _helpers yq -s 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } - -@test "ingressGateway/CustomResourceDefinition: disabled with controller.enabled=false" { - cd `chart_dir` - assert_empty helm template \ - -s templates/crd-meshes.yaml \ - --set 'controller.enabled=false' \ - . -} diff --git a/charts/consul/test/unit/crd-proxydefaults.bats b/charts/consul/test/unit/crd-proxydefaults.bats index 02e9227b69..8f6c080c17 100644 --- a/charts/consul/test/unit/crd-proxydefaults.bats +++ b/charts/consul/test/unit/crd-proxydefaults.bats @@ -2,16 +2,14 @@ load _helpers -@test "proxyDefaults/CustomResourceDefinition: enabled by default" { +@test "proxyDefaults/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-proxydefaults.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "proxyDefaults/CustomResourceDefinition: enabled with controller.enabled=true" { +@test "proxyDefaults/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-proxydefaults.yaml \ diff --git a/charts/consul/test/unit/crd-servicedefaults.bats b/charts/consul/test/unit/crd-servicedefaults.bats index 37cd0568e4..bf61eb934f 100644 --- a/charts/consul/test/unit/crd-servicedefaults.bats +++ b/charts/consul/test/unit/crd-servicedefaults.bats @@ -2,16 +2,14 @@ load _helpers -@test "serviceDefaults/CustomResourceDefinition: enabled by default" { +@test "serviceDefaults/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-servicedefaults.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "serviceDefaults/CustomResourceDefinition: enabled with controller.enabled=true" { +@test "serviceDefaults/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-servicedefaults.yaml \ diff --git a/charts/consul/test/unit/crd-serviceintentions.bats b/charts/consul/test/unit/crd-serviceintentions.bats index df1b7820cd..8f699d165e 100644 --- a/charts/consul/test/unit/crd-serviceintentions.bats +++ b/charts/consul/test/unit/crd-serviceintentions.bats @@ -2,13 +2,11 @@ load _helpers -@test "serviceintentions/CustomResourceDefinitions: enabled by default" { +@test "serviceintentions/CustomResourceDefinitions: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-serviceintentions.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "serviceintentions/CustomResourceDefinitions: enabled with controller.enabled=true" { diff --git a/charts/consul/test/unit/crd-serviceresolvers.bats b/charts/consul/test/unit/crd-serviceresolvers.bats index a6b6940a4f..660211ae9f 100644 --- a/charts/consul/test/unit/crd-serviceresolvers.bats +++ b/charts/consul/test/unit/crd-serviceresolvers.bats @@ -2,16 +2,14 @@ load _helpers -@test "serviceResolvers/CustomResourceDefinition: enabled by default" { +@test "serviceResolvers/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-serviceresolvers.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "serviceResolvers/CustomResourceDefinition: enabled with controller.enabled=true" { +@test "serviceResolvers/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-serviceresolvers.yaml \ diff --git a/charts/consul/test/unit/crd-servicerouters.bats b/charts/consul/test/unit/crd-servicerouters.bats index 9b95b9daba..cfe9bf7b06 100644 --- a/charts/consul/test/unit/crd-servicerouters.bats +++ b/charts/consul/test/unit/crd-servicerouters.bats @@ -2,16 +2,14 @@ load _helpers -@test "serviceRouters/CustomResourceDefinition: enabled by default" { +@test "serviceRouters/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-servicerouters.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "serviceRouters/CustomResourceDefinition: enabled with controller.enabled=true" { +@test "serviceRouters/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-servicerouters.yaml \ diff --git a/charts/consul/test/unit/crd-servicesplitters.bats b/charts/consul/test/unit/crd-servicesplitters.bats index 34d1261357..4e7bbdf61a 100644 --- a/charts/consul/test/unit/crd-servicesplitters.bats +++ b/charts/consul/test/unit/crd-servicesplitters.bats @@ -2,16 +2,14 @@ load _helpers -@test "serviceSplitters/CustomResourceDefinition: enabled by default" { +@test "serviceSplitters/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-servicesplitters.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "serviceSplitters/CustomResourceDefinition: enabled with controller.enabled=true" { +@test "serviceSplitters/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-servicesplitters.yaml \ diff --git a/charts/consul/test/unit/crd-terminatinggateway.bats b/charts/consul/test/unit/crd-terminatinggateway.bats index 658a271cf7..84ed725c90 100644 --- a/charts/consul/test/unit/crd-terminatinggateway.bats +++ b/charts/consul/test/unit/crd-terminatinggateway.bats @@ -2,16 +2,14 @@ load _helpers -@test "terminatingGateway/CustomResourceDefinition: enabled by default" { +@test "terminatingGateway/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-terminatinggateways.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "terminatingGateway/CustomResourceDefinition: enabled with controller.enabled=true" { +@test "terminatingGateway/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-terminatinggateways.yaml \ diff --git a/charts/consul/test/unit/create-federation-secret-job.bats b/charts/consul/test/unit/create-federation-secret-job.bats index 41e401f485..a3343db582 100644 --- a/charts/consul/test/unit/create-federation-secret-job.bats +++ b/charts/consul/test/unit/create-federation-secret-job.bats @@ -181,6 +181,33 @@ load _helpers [ "${actual}" = "true" ] } +@test "createFederationSecret/Job: auto-encrypt enabled" { + cd `chart_dir` + local obj=$(helm template \ + -s templates/create-federation-secret-job.yaml \ + --set 'global.federation.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.federation.createFederationSecret=true' \ + . | tee /dev/stderr) + + local actual + + # test it has the auto-encrypt volume + actual=$(echo "$obj" | yq '.spec.template.spec.volumes | map(select(.name == "consul-auto-encrypt-ca-cert")) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] + + # test it adds the init container + actual=$(echo "$obj" | yq '.spec.template.spec.initContainers | map(select(.name == "get-auto-encrypt-client-ca")) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] + + # test it sets CONSUL_CACERT to the auto-encrypt ca cert + actual=$(echo "$obj" | yq '.spec.template.spec.containers[0].env | map(select(.name == "CONSUL_CACERT" and .value == "/consul/tls/client/ca/tls.crt")) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # global.gossipEncryption @@ -362,3 +389,36 @@ load _helpers yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) [ "${actual}" = "testing" ] } + +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "createFederationSecret/Job: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/create-federation-secret-job.yaml \ + --set 'global.federation.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.federation.createFederationSecret=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/expose-servers-service.bats b/charts/consul/test/unit/expose-servers-service.bats index 09dd9b5b8a..a2a3e21c17 100644 --- a/charts/consul/test/unit/expose-servers-service.bats +++ b/charts/consul/test/unit/expose-servers-service.bats @@ -5,8 +5,45 @@ load _helpers @test "expose-servers/Service: disabled by default" { cd `chart_dir` assert_empty helm template \ - -s templates/expose-servers-service.yaml \ - . + -s templates/expose-servers-service.yaml \ + . +} + +@test "expose-servers/Service: enabled when servers and peering are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "expose-servers/Service: enable with global.enabled true and global.peering.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'global.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "expose-servers/Service: enable with global.peering.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] } @test "expose-servers/Service: enable with global.adminPartitions.enabled true" { @@ -14,12 +51,19 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } +@test "expose-servers/Service: disable when peering.enabled is false" { + cd `chart_dir` + assert_empty helm template \ + -s templates/expose-servers-service.yaml \ + --set 'server.enabled=true' \ + --set 'global.peering.enabled=false' \ + . +} @test "expose-servers/Service: disable with server.enabled" { cd `chart_dir` @@ -27,8 +71,7 @@ load _helpers -s templates/expose-servers-service.yaml \ --set 'server.enabled=false' \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ . } @@ -39,8 +82,7 @@ load _helpers --set 'global.enabled=false' \ --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ . } @@ -49,8 +91,7 @@ load _helpers local cmd=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.ports[0]' | tee /dev/stderr) @@ -65,8 +106,7 @@ load _helpers local cmd=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.ports[0]' | tee /dev/stderr) @@ -81,8 +121,7 @@ load _helpers local cmd=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=false' \ . | tee /dev/stderr | @@ -95,8 +134,7 @@ load _helpers local cmd=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=false' \ . | tee /dev/stderr | @@ -115,8 +153,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ . | tee /dev/stderr | yq -r '.metadata.annotations | length' | tee /dev/stderr) [ "${actual}" = "0" ] @@ -127,8 +164,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.annotations=key: value' \ . | tee /dev/stderr | yq -r '.metadata.annotations.key' | tee /dev/stderr) @@ -143,8 +179,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.http=4443' \ . | tee /dev/stderr | @@ -157,8 +192,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.https=4443' \ @@ -172,8 +206,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.rpc=4443' \ . | tee /dev/stderr | @@ -186,8 +219,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.serf=4444' \ . | tee /dev/stderr | @@ -200,8 +232,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.grpc=4444' \ . | tee /dev/stderr | @@ -214,8 +245,7 @@ load _helpers local ports=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.rpc=4443' \ --set 'server.exposeService.nodePort.grpc=4444' \ diff --git a/charts/consul/test/unit/ingress-gateways-deployment.bats b/charts/consul/test/unit/ingress-gateways-deployment.bats index 16327084bc..340aa4429b 100644 --- a/charts/consul/test/unit/ingress-gateways-deployment.bats +++ b/charts/consul/test/unit/ingress-gateways-deployment.bats @@ -39,6 +39,27 @@ load _helpers [ "${actual}" = "release-name-consul-ingress-gateway" ] } +@test "ingressGateways/Deployment: Adds consul service volumeMount to gateway container" { + cd `chart_dir` + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | yq '.spec.template.spec.containers[0].volumeMounts[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-service" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/service" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # prerequisites @@ -52,6 +73,40 @@ load _helpers [[ "$output" =~ "connectInject.enabled must be true" ]] } +@test "ingressGateways/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true" ]] +} + +@test "ingressGateways/Deployment: fails if global.enabled is false and clients are not explicitly enabled" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + +@test "ingressGateways/Deployment: fails if global.enabled is true but clients are explicitly disabled" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + @test "ingressGateways/Deployment: fails if there are duplicate gateway names" { cd `chart_dir` run helm template \ @@ -83,15 +138,27 @@ load _helpers [[ "$output" =~ "terminating gateways cannot have duplicate names of any ingress gateways" ]] } #-------------------------------------------------------------------- -# dataplaneImage +# envoyImage + +@test "ingressGateways/Deployment: envoy image has default global value" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[0].image' | tee /dev/stderr) + [[ "${actual}" =~ "envoyproxy/envoy:v" ]] + +} -@test "ingressGateways/Deployment: dataplane image can be set using the global value" { +@test "ingressGateways/Deployment: envoy image can be set using the global value" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.imageConsulDataplane=new/image' \ + --set 'global.imageEnvoy=new/image' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.spec.containers[0].image' | tee /dev/stderr) [ "${actual}" = "new/image" ] @@ -100,32 +167,41 @@ load _helpers #-------------------------------------------------------------------- # global.tls.enabled -@test "ingressGateways/Deployment: sets flags when global.tls.enabled is false" { +@test "ingressGateways/Deployment: sets TLS env variables when global.tls.enabled" { cd `chart_dir` - local object=$(helm template \ + local env=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | contains("-tls-disabled")' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_GRPC_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8502' ] + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } -@test "ingressGateways/Deployment: sets TLS flags when global.tls.enabled" { +@test "ingressGateways/Deployment: sets TLS env variables in consul sidecar when global.tls.enabled" { cd `chart_dir` - local object=$(helm template \ + local env=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[1].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | contains("-ca-certs=/consul/tls/ca/tls.crt")' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } @test "ingressGateways/Deployment: can overwrite CA secret with the provided one" { @@ -163,72 +239,183 @@ load _helpers [ "${actual}" != "" ] } +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "ingressGateways/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "ingressGateways/Deployment: Adds consul envvars on ingress-gateway-init init container when ACLs are enabled and tls is enabled" { +@test "ingressGateways/Deployment: consul-sidecar uses -token-file flag when global.acls.manageSystemACLs=true" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) + yq -s '[.[0].spec.template.spec.containers[1].command[7]] | any(contains("-token-file=/consul/service/acl-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_AUTH_METHOD") | .value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] +@test "ingressGateways/Deployment: consul-sidecar uses -consul-api-timeout flag" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s '[.[0].spec.template.spec.containers[1].command[6]] | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_DATACENTER") | .value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] +@test "ingressGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on ingress-gateway-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_META") | .value' | tee /dev/stderr) - [ "${actual}" = 'component=ingress-gateway,pod=$(NAMESPACE)/$(POD_NAME)' ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://\$(HOST_IP):8501" ] } -@test "ingressGateways/Deployment: ACL flags are not set when acls are disabled" { +@test "ingressGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on ingress-gateway-init init container when ACLs are enabled and tls is not enabled" { cd `chart_dir` - local object=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ + local env=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'ingressGateways.enabled=true' \ - --set 'global.acls.manageSystemACLs=false' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].command[2]' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '. | contains("-login-bearer-path")' | tee /dev/stderr) - [ "${actual}" = "false" ] + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | contains("-login-meta")' | tee /dev/stderr) - [ "${actual}" = "false" ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "http://\$(HOST_IP):8500" ] +} - local actual=$(echo $object | yq -r '. | contains("-login-method")' | tee /dev/stderr) - [ "${actual}" = "false" ] +@test "ingressGateways/Deployment: Does not add consul envvars CONSUL_CACERT on ingress-gateway-init init container when ACLs are enabled and tls is not enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | contains("-credential-type=login")' | tee /dev/stderr) - [ "${actual}" = "false" ] + [ "${actual}" = "" ] } -@test "ingressGateways/Deployment: command flags are set when acls are enabled" { +@test "ingressGateways/Deployment: Adds consul envvars CONSUL_CACERT on ingress-gateway-init init container when ACLs are enabled and tls is enabled" { cd `chart_dir` - local object=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ + local env=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'ingressGateways.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | contains("-login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token")' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} - local actual=$(echo $object | yq -r '. | contains("-login-meta=component=ingress-gateway")' | tee /dev/stderr) - [ "${actual}" = "true" ] +@test "ingressGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} - local actual=$(echo $object | yq -r '. | contains("-login-auth-method=release-name-consul-k8s-component-auth-method")' | tee /dev/stderr) +@test "ingressGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s '[.[0].spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) [ "${actual}" = "true" ] +} - local actual=$(echo $object | yq -r '. | contains("-credential-type=login")' | tee /dev/stderr) +@test "ingressGateways/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[3]] | any(contains("/consul-bin/consul logout"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -271,6 +458,19 @@ load _helpers [ "${actual}" = "/metrics" ] } +@test "ingressGateways/Deployment: when global.metrics.enabled=true, sets proxy setting" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.metrics.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + @test "ingressGateways/Deployment: when global.metrics.enableGatewayMetrics=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ @@ -282,6 +482,9 @@ load _helpers . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -302,6 +505,9 @@ load _helpers . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -525,78 +731,197 @@ load _helpers yq -s -r '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) - [ "${actual}" = "50Mi" ] + [ "${actual}" = "25Mi" ] local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) [ "${actual}" = "50m" ] local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) - [ "${actual}" = "50Mi" ] + [ "${actual}" = "150Mi" ] local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) [ "${actual}" = "50m" ] } -#-------------------------------------------------------------------- -# affinity - -@test "ingressGateways/Deployment: affinity defaults to one per node" { +@test "ingressGateways/Deployment: init container resources can be set through defaults" { cd `chart_dir` - local actual=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.limits.cpu=cpu2' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey' | tee /dev/stderr) - [ "${actual}" = "kubernetes.io/hostname" ] -} + yq -s -r '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) -@test "ingressGateways/Deployment: affinity can be set through defaults" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.affinity=key: value' \ - . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.affinity.key' | tee /dev/stderr) - [ "${actual}" = "value" ] + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] } -@test "ingressGateways/Deployment: affinity can be set through specific gateway, overriding defaults" { +@test "ingressGateways/Deployment: init container resources can be set through specific gateway, overriding defaults" { cd `chart_dir` - local actual=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.affinity=key: value' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.limits.cpu=cpu2' \ --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'ingressGateways.gateways[0].affinity=key2: value2' \ + --set 'ingressGateways.gateways[0].initCopyConsulContainer.resources.requests.memory=gwmemory' \ + --set 'ingressGateways.gateways[0].initCopyConsulContainer.resources.requests.cpu=gwcpu' \ + --set 'ingressGateways.gateways[0].initCopyConsulContainer.resources.limits.memory=gwmemory2' \ + --set 'ingressGateways.gateways[0].initCopyConsulContainer.resources.limits.cpu=gwcpu2' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.affinity.key2' | tee /dev/stderr) - [ "${actual}" = "value2" ] + yq -s '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu2" ] } #-------------------------------------------------------------------- -# tolerations +# consul sidecar resources -@test "ingressGateways/Deployment: no tolerations by default" { +@test "ingressGateways/Deployment: consul sidecar has default resources" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.tolerations' | tee /dev/stderr) - [ "${actual}" = "null" ] -} + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) -@test "ingressGateways/Deployment: tolerations can be set through defaults" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "25Mi" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "20m" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "50Mi" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "20m" ] +} + +@test "ingressGateways/Deployment: consul sidecar resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=memory' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=cpu' \ + --set 'global.consulSidecarContainer.resources.limits.memory=memory2' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +@test "ingressGateways/Deployment: fails if global.lifecycleSidecarContainer is set" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.lifecycleSidecarContainer.resources.requests.memory=100Mi' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." ]] +} + +#-------------------------------------------------------------------- +# affinity + +@test "ingressGateways/Deployment: affinity defaults to one per node" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey' | tee /dev/stderr) + [ "${actual}" = "kubernetes.io/hostname" ] +} + +@test "ingressGateways/Deployment: affinity can be set through defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.affinity=key: value' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.affinity.key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} + +@test "ingressGateways/Deployment: affinity can be set through specific gateway, overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.affinity=key: value' \ + --set 'ingressGateways.gateways[0].name=gateway1' \ + --set 'ingressGateways.gateways[0].affinity=key2: value2' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.affinity.key2' | tee /dev/stderr) + [ "${actual}" = "value2" ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "ingressGateways/Deployment: no tolerations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.tolerations' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "ingressGateways/Deployment: tolerations can be set through defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ --set 'ingressGateways.defaults.tolerations=- key: value' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.spec.tolerations[0].key' | tee /dev/stderr) @@ -749,7 +1074,7 @@ load _helpers --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "5" ] + [ "${actual}" = "1" ] } @test "ingressGateways/Deployment: extra annotations can be set through defaults" { @@ -764,7 +1089,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "7" ] + [ "${actual}" = "3" ] local actual=$(echo $object | yq -r '.key1' | tee /dev/stderr) [ "${actual}" = "value1" ] @@ -786,7 +1111,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "7" ] + [ "${actual}" = "3" ] local actual=$(echo $object | yq -r '.key1' | tee /dev/stderr) [ "${actual}" = "value1" ] @@ -809,7 +1134,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "8" ] + [ "${actual}" = "4" ] local actual=$(echo $object | yq -r '.defaultkey' | tee /dev/stderr) [ "${actual}" = "defaultvalue" ] @@ -821,6 +1146,413 @@ key2: value2' \ [ "${actual}" = "value2" ] } +#-------------------------------------------------------------------- +# WAN_ADDR + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for ClusterIP service set in defaults (the default)" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for ClusterIP service set in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=Static' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=ClusterIP' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for LoadBalancer service set in defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for LoadBalancer service set in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for NodePort service set in defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.defaults.service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"${HOST_IP}\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for NodePort service set in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=NodePort' \ + --set 'ingressGateways.gateways[0].service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"${HOST_IP}\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR definition fails if using unknown service type in defaults" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=Static' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "currently set ingressGateway value service.type is not supported" ]] +} + +@test "ingressGateways/Deployment: WAN_ADDR definition fails if using unknown service type in specific gateway overriding defaults" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=Static' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "currently set ingressGateway value service.type is not supported" ]] +} + +#-------------------------------------------------------------------- +# WAN_PORT + +@test "ingressGateways/Deployment: WAN_PORT set correctly for non-NodePort service in defaults (the default)" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=80")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT can be set for non-NodePort service in defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.ports[0].port=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT set correctly for non-NodePort service in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.ports[0].port=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT set correctly for NodePort service with nodePort set in defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.defaults.service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT set correctly for NodePort service with nodePort set in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.ports[0].nodePort=8888' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=NodePort' \ + --set 'ingressGateways.gateways[0].service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT definition fails if .service.type=NodePort and ports[0].nodePort is empty in defaults" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "if ingressGateways .service.type=NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a nodePort" ]] +} + +@test "ingressGateways/Deployment: WAN_PORT definition fails if .service.type=NodePort and ports[0].nodePort is empty in specific gateway and not provided in defaults" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.ports[0].port=1234' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "if ingressGateways .service.type=NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a nodePort" ]] +} + +@test "ingressGateways/Deployment: WAN_PORT definition fails if .service.type=NodePort and ports[0].nodePort is empty in defaults and specific gateway" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.defaults.service.ports=null' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "if ingressGateways .service.type=NodePort, the first port entry in either the defaults or specific gateway must include a nodePort" ]] +} + +#-------------------------------------------------------------------- +# ingress-gateway-init init container + +@test "ingressGateways/Deployment: ingress-gateway-init init container defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-ingress-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT=8080 + +cat > /consul/service/service.hcl << EOF +service { + kind = "ingress-gateway" + name = "ingress-gateway" + id = "${POD_NAME}" + port = ${WAN_PORT} + address = "${WAN_ADDR}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 21000 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + proxy { + config { + envoy_gateway_no_default_bind = true + envoy_gateway_bind_addresses { + all-interfaces { + address = "0.0.0.0" + } + } + } + } + checks = [ + { + name = "Ingress Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:21000" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container with acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane acl-init \ + -component-name=ingress-gateway/release-name-consul-ingress-gateway \ + -acl-auth-method=release-name-consul-k8s-component-auth-method \ + -token-sink-file=/consul/service/acl-token \ + -consul-api-timeout=5s \ + -log-level=info \ + -log-json=false + +consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-ingress-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT=8080 + +cat > /consul/service/service.hcl << EOF +service { + kind = "ingress-gateway" + name = "ingress-gateway" + id = "${POD_NAME}" + port = ${WAN_PORT} + address = "${WAN_ADDR}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 21000 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + proxy { + config { + envoy_gateway_no_default_bind = true + envoy_gateway_bind_addresses { + all-interfaces { + address = "0.0.0.0" + } + } + } + } + checks = [ + { + name = "Ingress Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:21000" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + -token-file=/consul/service/acl-token \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container includes service-address command for LoadBalancer set through defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container includes service-address command for LoadBalancer set through specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container does not include service-address command for NodePort set through defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.defaults.service.ports[0].port=80' \ + --set 'ingressGateways.defaults.service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container does not include service-address command for NodePort set through specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=NodePort' \ + --set 'ingressGateways.gateways[0].service.ports[0].port=80' \ + --set 'ingressGateways.gateways[0].service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + #-------------------------------------------------------------------- # namespaces @@ -831,10 +1563,13 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) - [ "${actual}" = "null" ] + local actual=$(echo $object | yq -r '.command | any(contains("-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } @test "ingressGateways/Deployment: namespace command flag is specified through defaults" { @@ -846,13 +1581,16 @@ key2: value2' \ --set 'global.enableConsulNamespaces=true' \ --set 'ingressGateways.defaults.consulNamespace=namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-namespace=namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo $object | yq -r '."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) - [ "${actual}" = "namespace" ] + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace=namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "ingressGateways/Deployment: namespace annotation is specified through specific gateway overriding defaults" { +@test "ingressGateways/Deployment: namespace command flag is specified through specific gateway overriding defaults" { cd `chart_dir` local object=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ @@ -863,10 +1601,13 @@ key2: value2' \ --set 'ingressGateways.gateways[0].name=ingress-gateway' \ --set 'ingressGateways.gateways[0].consulNamespace=new-namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-namespace=new-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo $object | yq -r '."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) - [ "${actual}" = "new-namespace" ] + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace=new-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -883,6 +1624,9 @@ key2: value2' \ local actual=$(echo $object | yq -r '.command | any(contains("-partition"))' | tee /dev/stderr) [ "${actual}" = "false" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-partition"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } @test "ingressGateways/Deployment: partition command flag is specified through partition name" { @@ -897,7 +1641,10 @@ key2: value2' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '.command | any(contains("-service-partition=default"))' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-partition=default"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -944,6 +1691,38 @@ key2: value2' \ [ "${actual}" = "false" ] } +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "ingressGateways/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=gateway1' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault @@ -954,6 +1733,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -986,6 +1766,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -993,10 +1774,8 @@ key2: value2' \ --set 'global.secretsBackend.vault.consulCARole=test' \ . | tee /dev/stderr | yq -r '.spec.template' | tee /dev/stderr) - local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') [ "${actual}" = "false" ] - local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') [ "${actual}" = "false" ] } @@ -1009,6 +1788,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1030,6 +1810,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1051,6 +1832,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1081,7 +1863,7 @@ key2: value2' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role") | del(."consul.hashicorp.com/gateway-wan-address-source") | del(."consul.hashicorp.com/gateway-wan-port") | del(."vconsul.hashicorp.com/gateway-wan-address-source") | del(."consul.hashicorp.com/gateway-consul-service-name") | del(."consul.hashicorp.com/gateway-kind")' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) [ "${actual}" = "{}" ] } @@ -1092,6 +1874,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1142,253 +1925,3 @@ key2: value2' \ yq -s -r '.[0].spec.template.spec.terminationGracePeriodSeconds' | tee /dev/stderr) [ "${actual}" = "30" ] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "ingressGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/consul/test/unit/mesh-gateway-clusterrole.bats b/charts/consul/test/unit/mesh-gateway-clusterrole.bats index 3cb5826969..da4d0bdb2c 100644 --- a/charts/consul/test/unit/mesh-gateway-clusterrole.bats +++ b/charts/consul/test/unit/mesh-gateway-clusterrole.bats @@ -38,6 +38,7 @@ load _helpers -s templates/mesh-gateway-clusterrole.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=LoadBalancer' \ --set 'meshGateway.wanAddress.source=Service' \ . | tee /dev/stderr | @@ -65,6 +66,7 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.enablePodSecurityPolicies=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=LoadBalancer' \ --set 'meshGateway.wanAddress.source=Service' \ . | tee /dev/stderr | diff --git a/charts/consul/test/unit/mesh-gateway-deployment.bats b/charts/consul/test/unit/mesh-gateway-deployment.bats index 30b612ef35..2cf47ee748 100755 --- a/charts/consul/test/unit/mesh-gateway-deployment.bats +++ b/charts/consul/test/unit/mesh-gateway-deployment.bats @@ -20,6 +20,16 @@ load _helpers [ "${actual}" = "true" ] } +@test "meshGateway/Deployment: consul-sidecar uses -consul-api-timeout" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[1].command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} #-------------------------------------------------------------------- # prerequisites @@ -33,6 +43,40 @@ load _helpers [[ "$output" =~ "connectInject.enabled must be true" ]] } +@test "meshGateway/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true" ]] +} + +@test "meshGateway/Deployment: fails if global.enabled is false and clients are not explicitly enabled" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + +@test "meshGateway/Deployment: fails if global.enabled is true but clients are explicitly disabled" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + #-------------------------------------------------------------------- # annotations @@ -44,7 +88,7 @@ load _helpers --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "7" ] + [ "${actual}" = "1" ] } @test "meshGateway/Deployment: extra annotations can be set" { @@ -57,7 +101,7 @@ load _helpers key2: value2' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "9" ] + [ "${actual}" = "3" ] } #-------------------------------------------------------------------- @@ -99,7 +143,20 @@ key2: value2' \ [ "${actual}" = "/metrics" ] } -@test "meshGateway/Deployment: when global.metrics.enableGatewayMetrics=false, does not set annotations" { +@test "meshGateway/Deployment: when global.metrics.enabled=true, sets proxy setting" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.metrics.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: when global.metrics.enableGatewayMetrics=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -110,6 +167,9 @@ key2: value2' \ . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -120,7 +180,7 @@ key2: value2' \ [ "${actual}" = "null" ] } -@test "meshGateway/Deployment: when global.metrics.enabled=false, does not set annotations" { +@test "meshGateway/Deployment: when global.metrics.enabled=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -130,6 +190,9 @@ key2: value2' \ . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -296,6 +359,32 @@ key2: value2' \ [ "${actual}" = "ClusterFirst" ] } +#-------------------------------------------------------------------- +# envoyImage + +@test "meshGateway/Deployment: envoy image has default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [[ "${actual}" =~ "envoyproxy/envoy:v" ]] + +} + +@test "meshGateway/Deployment: setting meshGateway.imageEnvoy fails" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.imageEnvoy=new/image' . + [ "$status" -eq 1 ] + [[ "$output" =~ "meshGateway.imageEnvoy must be specified in global" ]] +} + #-------------------------------------------------------------------- # resources @@ -352,13 +441,57 @@ key2: value2' \ . | tee /dev/stderr | yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + [ $(echo "${actual}" | yq -r '.requests.memory') = "25Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "50m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "150Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "50m" ] +} + +@test "meshGateway/Deployment: init container resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'meshGateway.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'meshGateway.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'meshGateway.initCopyConsulContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +#-------------------------------------------------------------------- +# mesh-gateway-init container resources + +@test "meshGateway/Deployment: init mesh-gateway-init container has default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].resources' | tee /dev/stderr) + [ $(echo "${actual}" | yq -r '.requests.memory') = "50Mi" ] [ $(echo "${actual}" | yq -r '.requests.cpu') = "50m" ] [ $(echo "${actual}" | yq -r '.limits.memory') = "50Mi" ] [ $(echo "${actual}" | yq -r '.limits.cpu') = "50m" ] } -@test "meshGateway/Deployment: init container resources can be set" { +@test "meshGateway/Deployment: init mesh-gateway-init container resources can be set" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -369,7 +502,51 @@ key2: value2' \ --set 'meshGateway.initServiceInitContainer.resources.limits.memory=memory2' \ --set 'meshGateway.initServiceInitContainer.resources.limits.cpu=cpu2' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[1].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +#-------------------------------------------------------------------- +# consul sidecar resources + +@test "meshGateway/Deployment: consul sidecar has default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "25Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "20m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "50Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "20m" ] +} + +@test "meshGateway/Deployment: consul sidecar resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=memory' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=cpu' \ + --set 'global.consulSidecarContainer.resources.limits.memory=memory2' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) [ "${actual}" = "memory" ] @@ -384,6 +561,17 @@ key2: value2' \ [ "${actual}" = "cpu2" ] } +@test "meshGateway/Deployment: fails if global.lifecycleSidecarContainer is set" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.lifecycleSidecarContainer.resources.requests.memory=100Mi' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." ]] +} + #-------------------------------------------------------------------- # containerPort @@ -435,185 +623,214 @@ key2: value2' \ #-------------------------------------------------------------------- # manageSystemACLs -@test "meshGateway/Deployment: ACL specific flags are not set when acls are disabled" { +@test "meshGateway/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` - local command=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].command[2]]' | tee /dev/stderr) - - local actual=$(echo $command | yq -r '. | any(contains("credential-type=login"))'| tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-bearer-path"))'| tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-meta"))'| tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-method"))'| tee /dev/stderr) - [ "${actual}" = "false" ] + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[3]] | any(contains("/consul-bin/consul logout"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: ACL specific flags are set when acls are enabled" { +@test "meshGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { cd `chart_dir` - local command=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].command[2]]' | tee /dev/stderr) - - local actual=$(echo $command | yq -r '. | any(contains("credential-type=login"))'| tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token"))'| tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-meta=pod=${NAMESPACE}/${POD_NAME}"))'| tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-auth-method=release-name-consul-k8s-component-auth-method"))'| tee /dev/stderr) - [ "${actual}" = "true" ] + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -@test "meshGateway/Deployment: correct login-method and login-datacenter are set with federation is enabled and in secondary DC" { +@test "meshGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` - local command=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc2' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].command[2]]' | tee /dev/stderr) - - local actual=$(echo $command | yq -r '. | any(contains("-login-auth-method=release-name-consul-k8s-component-auth-method-dc1"))'| tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-datacenter=dc2"))'| tee /dev/stderr) - [ "${actual}" = "true" ] + yq '[.spec.template.spec.containers[0].env[2].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: correct login-partition is set with partitions is enabled" { +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { cd `chart_dir` - local command=$(helm template \ + local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=other-partition' \ - --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].command[2]]' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "mesh-gateway-init" ] - local actual=$(echo $command | yq -r '. | any(contains("-login-partition=other-partition"))'| tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: init container has correct environment with global.acls.manageSystemACLs=true" { +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.name' | tee /dev/stderr) - [ "${actual}" = "mesh-gateway-init" ] + yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[7].name] | any(contains("CONSUL_LOGIN_AUTH_METHOD"))' | tee /dev/stderr) + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[7].value] | any(contains("release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + yq '[.env[2].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) + yq '[.env[3].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].value] | any(contains("dc1"))' | tee /dev/stderr) + yq '[.env[3].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[9].name] | any(contains("CONSUL_LOGIN_META"))' | tee /dev/stderr) + yq '.volumeMounts[2] | any(contains("consul-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[9].value] | any(contains("component=mesh-gateway,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: init container has correct environment variables when tls enabled" { +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[7].name] | any(contains("CONSUL_USE_TLS"))' | tee /dev/stderr) + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[7].value] | any(contains("true"))' | tee /dev/stderr) + yq '[.env[2].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_CACERT_FILE"))' | tee /dev/stderr) + yq '[.env[3].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + yq '[.env[3].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[2] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: init container has correct envs with Partitions enabled" { +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=default' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[7].name] | any(contains("CONSUL_PARTITION"))' | tee /dev/stderr) + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[7].value] | any(contains("default"))' | tee /dev/stderr) + yq '[.env[2].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_LOGIN_PARTITION"))' | tee /dev/stderr) + yq '[.env[3].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].value] | any(contains("default"))' | tee /dev/stderr) + yq '[.env[3].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[2] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] } -@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct env when federation enabled in non-primary datacenter" { +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when federation enabled in non-primary datacenter" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -625,24 +842,25 @@ key2: value2' \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[9].name] | any(contains("CONSUL_LOGIN_AUTH_METHOD"))' | tee /dev/stderr) + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[9].value] | any(contains("release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[10].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[10].value] | any(contains("dc1"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -751,54 +969,41 @@ key2: value2' \ #-------------------------------------------------------------------- # global.tls.enabled -@test "meshGateway/Deployment: sets TLS args when global.tls.disabled" { +@test "meshGateway/Deployment: sets TLS env variables when global.tls.enabled" { cd `chart_dir` - local flags=$(helm template \ + local env=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].command[]' | tee /dev/stderr) + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual=$(echo $flags | yq -r '. | contains("-tls-disabled")' | tee /dev/stderr) - [ "${actual}" = 'true' ] -} + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] -@test "meshGateway/Deployment: sets TLS args when global.tls.enabled" { - cd `chart_dir` - local flags=$(helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'meshGateway.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].command[]' | tee /dev/stderr) + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_GRPC_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8502' ] - local actual=$(echo $flags | yq -r '. | contains("-ca-certs=/consul/tls/ca/tls.crt")' | tee /dev/stderr) - [ "${actual}" = 'true' ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } -@test "meshGateway/Deployment: sets external server args when global.tls.enabled and externalServers.enabled" { +@test "meshGateway/Deployment: sets TLS env variables in consul sidecar when global.tls.enabled" { cd `chart_dir` - local flags=$(helm template \ + local env=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.useSystemRoots=true' \ - --set 'externalServers.tlsServerName=foo.tls.server' \ - --set 'externalServers.hosts[0]=host' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].command[]' | tee /dev/stderr) + yq -r '.spec.template.spec.containers[1].env[]' | tee /dev/stderr) - local actual=$(echo $flags | yq -r '. | contains("-ca-certs=/consul/tls/ca/tls.crt")' | tee /dev/stderr) - [ "${actual}" = 'false' ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] - local actual=$(echo $flags | yq -r '. | contains("-tls-server-name=foo.tls.server")' | tee /dev/stderr) - [ "${actual}" = 'true' ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } @test "meshGateway/Deployment: can overwrite CA secret with the provided one" { @@ -837,56 +1042,238 @@ key2: value2' \ [ "${actual}" != "" ] } -@test "meshGateway/Deployment: CA cert volume mount present when TLS is enabled" { +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "meshGateway/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" != "" ] + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: CA cert volume is not present when TLS is enabled with externalServers and useSystemRoots" { +@test "meshGateway/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.useSystemRoots=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: CA cert volume mount is not present when TLS is enabled with externalServers and useSystemRoots" { +@test "meshGateway/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ + --set 'externalServers.hosts[0]=foo.com' \ --set 'externalServers.useSystemRoots=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) [ "${actual}" = "" ] } ##-------------------------------------------------------------------- -## mesh-gateway service annotations +## mesh-gateway-init init container -@test "meshGateway/Deployment: mesh-gateway annotations containerPort and wanAddress.port can be changed" { +@test "meshGateway/Deployment: mesh-gateway-init init container" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container with acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane acl-init \ + -component-name=mesh-gateway \ + -token-sink-file=/consul/service/acl-token \ + -acl-auth-method=release-name-consul-k8s-component-auth-method \ + -consul-api-timeout=5s \ + -log-level=info \ + -log-json=false + +consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + -token-file=/consul/service/acl-token \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container with global.federation.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + meta { + consul-wan-federation = "1" + } + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container containerPort and wanAddress.port can be changed" { + cd `chart_dir` + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ @@ -894,56 +1281,140 @@ key2: value2' \ --set 'meshGateway.wanAddress.source=NodeIP' \ --set 'meshGateway.wanAddress.port=9999' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8888" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "NodeIP" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "9999" ] -} - -@test "meshGateway/Deployment: mesh-gateway annotations wanAddress.source=NodeIP" { + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='WAN_ADDR="${HOST_IP}" +WAN_PORT="9999" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8888 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8888 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8888" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=NodeIP" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=NodeIP' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] + exp='WAN_ADDR="${HOST_IP}" +WAN_PORT="443" - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "NodeIP" ] +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: mesh-gateway annotations wanAddress.source=NodeName" { +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=NodeName" { cd `chart_dir` - local annotations=$(helm template \ + local obj=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=NodeName' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) + . | tee /dev/stderr) - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "NodeName" ] + local actual=$(echo "$obj" | + yq -r '.spec.template.spec.containers[0].env | map(select(.name == "NODE_NAME")) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] + local actual=$(echo "$obj" | + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='WAN_ADDR="${NODE_NAME}" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Static fails if wanAddress.static is empty" { @@ -955,76 +1426,174 @@ key2: value2' \ --set 'meshGateway.wanAddress.source=Static' \ --set 'meshGateway.wanAddress.static=' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "if meshGateway.wanAddress.source=Static then meshGateway.wanAddress.static cannot be empty" ]] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Static" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Static' \ --set 'meshGateway.wanAddress.static=example.com' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "Static" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-static"]' | tee /dev/stderr) - [ "${actual}" = "example.com" ] + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='WAN_ADDR="example.com" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service fails if service.enable is false" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.wanAddress.source=Service' \ + --set 'meshGateway.service.enabled=false' \ + . - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] + [ "$status" -eq 1 ] + [[ "$output" =~ "if meshGateway.wanAddress.source=Service then meshGateway.service.enabled must be set to true" ]] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=LoadBalancer" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Service' \ --set 'meshGateway.wanAddress.port=ignored' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=LoadBalancer' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "Service" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=NodePort" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Service' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.nodePort=9999' \ --set 'meshGateway.service.type=NodePort' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "Service" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "9999" ] + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='WAN_ADDR="${HOST_IP}" +WAN_PORT="9999" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=NodePort fails if service.nodePort is null" { @@ -1034,60 +1603,119 @@ key2: value2' \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Service' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=NodePort' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "if meshGateway.wanAddress.source=Service and meshGateway.service.type=NodePort, meshGateway.service.nodePort must be set" ]] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=ClusterIP" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Service' \ --set 'meshGateway.wanAddress.port=ignored' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=ClusterIP' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "Service" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] -} - -@test "meshGateway/Deployment: CA cert volume mount present on the init container when TLS is enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'meshGateway.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" != "" ] -} - -@test "meshGateway/Deployment: CA cert volume mount present is not present on the init container when TLS is enabled with externalServers and useSystemRoots" { + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container consulServiceName can be changed" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.useSystemRoots=true' \ + --set 'meshGateway.consulServiceName=new-name' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "new-name" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } #-------------------------------------------------------------------- @@ -1129,7 +1757,7 @@ key2: value2' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0].env[7].value | contains("default")' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[1].command | any(contains("partition = \"default\""))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1160,6 +1788,37 @@ key2: value2' \ [[ "$output" =~ "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" ]] } +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "meshGateway/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault @@ -1309,7 +1968,7 @@ key2: value2' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role") | del(."consul.hashicorp.com/gateway-kind") | del(."consul.hashicorp.com/gateway-wan-address-source") | del(."consul.hashicorp.com/mesh-gateway-container-port") | del(."consul.hashicorp.com/gateway-wan-address-static") | del(."consul.hashicorp.com/gateway-wan-port") | del(."consul.hashicorp.com/gateway-consul-service-name")' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) [ "${actual}" = "{}" ] } @@ -1331,231 +1990,3 @@ key2: value2' \ yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) [ "${actual}" = "bar" ] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "meshGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/consul/test/unit/mesh-gateway-service.bats b/charts/consul/test/unit/mesh-gateway-service.bats index acedeb22b3..60ca3a3503 100755 --- a/charts/consul/test/unit/mesh-gateway-service.bats +++ b/charts/consul/test/unit/mesh-gateway-service.bats @@ -20,12 +20,13 @@ load _helpers [ "${actual}" = "true" ] } -@test "meshGateway/Service: enabled with meshGateway.enabled=true" { +@test "meshGateway/Service: enabled with meshGateway.enabled=true meshGateway.service.enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -40,6 +41,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.metadata.annotations' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -51,6 +53,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.annotations=key: value' \ . | tee /dev/stderr | yq -r '.metadata.annotations.key' | tee /dev/stderr) @@ -66,6 +69,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.ports[0].port' | tee /dev/stderr) [ "${actual}" = "443" ] @@ -77,6 +81,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.port=8443' \ . | tee /dev/stderr | yq -r '.spec.ports[0].port' | tee /dev/stderr) @@ -92,6 +97,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.ports[0].targetPort' | tee /dev/stderr) [ "${actual}" = "8443" ] @@ -103,6 +109,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.containerPort=9443' \ . | tee /dev/stderr | yq -r '.spec.ports[0].targetPort' | tee /dev/stderr) @@ -118,6 +125,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -129,6 +137,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.nodePort=8443' \ . | tee /dev/stderr | yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) @@ -144,6 +153,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.type' | tee /dev/stderr) [ "${actual}" = "LoadBalancer" ] @@ -155,6 +165,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=ClusterIP' \ . | tee /dev/stderr | yq -r '.spec.type' | tee /dev/stderr) @@ -170,6 +181,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.additionalSpec=key: value' \ . | tee /dev/stderr | yq -r '.spec.key' | tee /dev/stderr) diff --git a/charts/consul/test/unit/partition-init-job.bats b/charts/consul/test/unit/partition-init-job.bats index 82ffc959fa..816ad26ede 100644 --- a/charts/consul/test/unit/partition-init-job.bats +++ b/charts/consul/test/unit/partition-init-job.bats @@ -6,15 +6,14 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/partition-init-job.yaml \ - . + . } -@test "partitionInit/Job: enabled with global.adminPartitions.enabled=true and server.enabled=false" { +@test "partitionInit/Job: enabled with global.adminPartitions.enabled=true and servers = false" { cd `chart_dir` local actual=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ --set 'global.adminPartitions.name=bar' \ --set 'externalServers.enabled=true' \ @@ -29,7 +28,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -39,7 +37,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . } @@ -49,7 +46,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -59,7 +55,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -70,80 +65,62 @@ load _helpers -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=bar' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=false' . [ "$status" -eq 1 ] [[ "$output" =~ "externalServers.enabled needs to be true and configured to create a non-default partition." ]] } -@test "partitionInit/Job: consul env defaults" { +@test "partitionInit/Job: command defaults" { cd `chart_dir` - local env=$(helm template \ - -s templates/partition-init-job.yaml \ + local command=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=bar' \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ - --set 'server.enabled=false' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8502" ] + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + local actual + actual=$(echo $command | jq -r '. | any(contains("consul-k8s-control-plane partition-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_API_TIMEOUT").value' | tee /dev/stderr) - [ "${actual}" = "5s" ] + actual=$(echo $command | jq -r '. | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- # global.tls.enabled -@test "partitionInit/Job: sets TLS env vars when global.tls.enabled" { +@test "partitionInit/Job: sets TLS flags when global.tls.enabled" { cd `chart_dir` - local env=$(helm template \ + local command=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.tls.enabled=true' \ --set 'global.adminPartitions.name=bar' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] + local actual + actual=$(echo $command | jq -r '. | any(contains("-use-https"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_USE_TLS").value' | tee /dev/stderr) + actual=$(echo $command | jq -r '. | any(contains("-ca-file=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] + actual=$(echo $command | jq -r '. | any(contains("-server-port=8501"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "partitionInit/Job: does not set consul ca cert when .externalServers.useSystemRoots is true" { +@test "partitionInit/Job: does not set consul ca cert or server-port when .externalServers.useSystemRoots is true" { cd `chart_dir` - local spec=$(helm template \ + local command=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ @@ -153,19 +130,11 @@ load _helpers --set 'externalServers.hosts[0]=foo' \ --set 'externalServers.useSystemRoots=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '.containers[0].env[] | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$env" | - jq -r '.volumes[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '.spec.volumeMounts[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] + local actual + actual=$(echo $command | jq -r '. | any(contains("-ca-file=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } @test "partitionInit/Job: can overwrite CA secret with the provided one" { @@ -175,7 +144,6 @@ load _helpers --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=bar' \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -199,20 +167,19 @@ load _helpers #-------------------------------------------------------------------- # global.acls.bootstrapToken -@test "partitionInit/Job: CONSUL_ACL_TOKEN is set when global.acls.bootstrapToken is provided" { +@test "partitionInit/Job: HTTP_TOKEN is set when global.acls.bootstrapToken is provided" { cd `chart_dir` local actual=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=bar' \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.acls.bootstrapToken.secretName=partition-token' \ --set 'global.acls.bootstrapToken.secretKey=token' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -262,7 +229,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.acls.bootstrapToken.secretName=boot' \ --set 'global.acls.bootstrapToken.secretKey=token' \ @@ -283,7 +249,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ @@ -315,7 +280,7 @@ reservedNameTest() { [ "${actual}" = "${expected}" ] # Check that the bootstrap token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_ACL_TOKEN_FILE").value') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_HTTP_TOKEN_FILE").value') [ "${actual}" = "/vault/secrets/bootstrap-token" ] # Check that no (secret) volumes are not attached @@ -333,7 +298,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ @@ -377,7 +341,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -419,7 +382,7 @@ reservedNameTest() { [ "${actual}" = "${expected}" ] # Check that the bootstrap token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_ACL_TOKEN_FILE").value') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_HTTP_TOKEN_FILE").value') [ "${actual}" = "/vault/secrets/bootstrap-token" ] # Check that the consul-ca-cert volume is not attached @@ -437,7 +400,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -462,7 +424,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -488,7 +449,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -514,7 +474,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -544,7 +503,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -566,7 +524,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -581,301 +538,3 @@ reservedNameTest() { yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) [ "${actual}" = "bar" ] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "partitionInit/Job: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/consul/test/unit/partition-init-podsecuritypolicy.bats b/charts/consul/test/unit/partition-init-podsecuritypolicy.bats index 1519c7254e..d00c915f6e 100644 --- a/charts/consul/test/unit/partition-init-podsecuritypolicy.bats +++ b/charts/consul/test/unit/partition-init-podsecuritypolicy.bats @@ -9,12 +9,11 @@ load _helpers . } -@test "partitionInit/PodSecurityPolicy: enabled with global.adminPartitions.enabled=true and server.enabled=false" { +@test "partitionInit/PodSecurityPolicy: enabled with global.adminPartitions.enabled=true and servers = false" { cd `chart_dir` local actual=$(helm template \ -s templates/partition-init-podsecuritypolicy.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -26,7 +25,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-podsecuritypolicy.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -36,7 +34,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-podsecuritypolicy.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -46,7 +43,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-podsecuritypolicy.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-init-role.bats b/charts/consul/test/unit/partition-init-role.bats index 16a5b980b3..c434aa3d87 100644 --- a/charts/consul/test/unit/partition-init-role.bats +++ b/charts/consul/test/unit/partition-init-role.bats @@ -9,24 +9,22 @@ load _helpers . } -@test "partitionInit/Role: enabled with global.adminPartitions.enabled=true and server.enabled=false" { +@test "partitionInit/Role: enabled with global.adminPartitions.enabled=true and servers = false" { cd `chart_dir` local actual=$(helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "partitionInit/Role: disabled with global.adminPartitions.enabled=true and server.enabled=true" { +@test "partitionInit/Role: disabled with global.adminPartitions.enabled=true and servers = true" { cd `chart_dir` assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -36,7 +34,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -46,7 +43,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-init-rolebinding.bats b/charts/consul/test/unit/partition-init-rolebinding.bats index f8af27cb21..d96f6e6cd3 100644 --- a/charts/consul/test/unit/partition-init-rolebinding.bats +++ b/charts/consul/test/unit/partition-init-rolebinding.bats @@ -14,7 +14,6 @@ load _helpers local actual=$(helm template \ -s templates/partition-init-rolebinding.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -26,7 +25,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-rolebinding.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -36,7 +34,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-rolebinding.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -46,7 +43,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-rolebinding.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-init-serviceaccount.bats b/charts/consul/test/unit/partition-init-serviceaccount.bats index 155e6d9e28..6195969686 100644 --- a/charts/consul/test/unit/partition-init-serviceaccount.bats +++ b/charts/consul/test/unit/partition-init-serviceaccount.bats @@ -14,7 +14,6 @@ load _helpers local actual=$(helm template \ -s templates/partition-init-serviceaccount.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -26,7 +25,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-serviceaccount.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -36,7 +34,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-serviceaccount.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -46,7 +43,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-serviceaccount.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-name-configmap.bats b/charts/consul/test/unit/partition-name-configmap.bats index 40e65ca3c5..e516c9ae13 100644 --- a/charts/consul/test/unit/partition-name-configmap.bats +++ b/charts/consul/test/unit/partition-name-configmap.bats @@ -14,7 +14,6 @@ load _helpers local actual=$(helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -26,7 +25,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -36,7 +34,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -47,4 +44,4 @@ load _helpers -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=false' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-service.bats b/charts/consul/test/unit/partition-service.bats new file mode 100755 index 0000000000..b772b32d5e --- /dev/null +++ b/charts/consul/test/unit/partition-service.bats @@ -0,0 +1,133 @@ +#!/usr/bin/env bats + +load _helpers + +@test "partition/Service: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/partition-service.yaml \ + . +} + +@test "partition/Service: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + --set 'global.adminPartitions.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "partition/Service: disable with adminPartitions.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=false' \ + . +} + +@test "partition/Service: disable with server.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'server.enabled=false' \ + . +} + +@test "partition/Service: disable with global.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/partition-service.yaml \ + --set 'global.enabled=false' \ + . +} + +#-------------------------------------------------------------------- +# annotations + +@test "partition/Service: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "partition/Service: can set annotations" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.annotations=key: value' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} + +#-------------------------------------------------------------------- +# nodePort + +@test "partition/Service: RPC node port can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.type=NodePort' \ + --set 'global.adminPartitions.service.nodePort.rpc=4443' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "server") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4443" ] +} + +@test "partition/Service: Serf node port can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.type=NodePort' \ + --set 'global.adminPartitions.service.nodePort.serf=4444' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "serflan") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4444" ] +} + +@test "partition/Service: HTTPS node port can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.type=NodePort' \ + --set 'global.adminPartitions.service.nodePort.https=4444' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "https") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4444" ] +} + +@test "partition/Service: RPC, Serf and HTTPS node ports can be set" { + cd `chart_dir` + local ports=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.type=NodePort' \ + --set 'global.adminPartitions.service.nodePort.rpc=4443' \ + --set 'global.adminPartitions.service.nodePort.https=4444' \ + --set 'global.adminPartitions.service.nodePort.serf=4445' \ + . | tee /dev/stderr | + yq -r '.spec.ports[]' | tee /dev/stderr) + + local actual + actual=$(echo $ports | jq -r 'select(.name == "server") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4443" ] + + actual=$(echo $ports | jq -r 'select(.name == "https") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4444" ] + + actual=$(echo $ports | jq -r 'select(.name == "serflan") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4445" ] +} diff --git a/charts/consul/test/unit/server-acl-init-job.bats b/charts/consul/test/unit/server-acl-init-job.bats index 483564c64b..cc9bfb5f77 100644 --- a/charts/consul/test/unit/server-acl-init-job.bats +++ b/charts/consul/test/unit/server-acl-init-job.bats @@ -99,6 +99,28 @@ load _helpers [[ "$output" =~ "global.bootstrapACLs was removed, use global.acls.manageSystemACLs instead" ]] } +@test "serverACLInit/Job: does not set -client=false when client is enabled (the default)" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command[2] | contains("-client=false")' | + tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "serverACLInit/Job: sets -consul-api-timeout=5s" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command[2] | contains("-consul-api-timeout=5s")' | + tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "serverACLInit/Job: sets -client=false when client is disabled" { cd `chart_dir` local actual=$(helm template \ @@ -111,6 +133,25 @@ load _helpers [ "${actual}" = "true" ] } +@test "serverACLInit/Job: server address is set to the DNS names of the server stateful set" { + cd `chart_dir` + local command=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual + actual=$(echo $command | jq -r '. | any(contains("-server-address=\"${CONSUL_FULLNAME}-server-0.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | any(contains("-server-address=\"${CONSUL_FULLNAME}-server-1.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | any(contains("-server-address=\"${CONSUL_FULLNAME}-server-2.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # dns @@ -204,7 +245,7 @@ load _helpers local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'server.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-snapshot-agent"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -498,52 +539,26 @@ load _helpers #-------------------------------------------------------------------- # global.tls.enabled -@test "serverACLInit/Job: sets TLS env vars when global.tls.enabled" { +@test "serverACLInit/Job: sets TLS flags when global.tls.enabled" { cd `chart_dir` - local object=$(helm template \ + local command=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[7].name] | any(contains("CONSUL_USE_TLS"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) - local actual=$(echo $object | - yq '[.env[7].value] | any(contains("true"))' | tee /dev/stderr) + local actual + actual=$(echo $command | jq -r '. | any(contains("-use-https"))' | tee /dev/stderr) [ "${actual}" = "true" ] - local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_CACERT_FILE"))' | tee /dev/stderr) + actual=$(echo $command | jq -r '. | any(contains("-consul-ca-cert=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "true" ] - local actual=$(echo $object | - yq '[.env[8].value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + actual=$(echo $command | jq -r '. | any(contains("-server-port=8501"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "serverACLInit/Job: does not add consul-ca-cert volume when global.tls.enabled with externalServers and useSystemRoots" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.useSystemRoots=true' \ - --set 'servers.enabled=false' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec' | tee /dev/stderr) - - actual=$(echo $spec | jq -r '.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - actual=$(echo $spec | jq -r '.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - @test "serverACLInit/Job: can overwrite CA secret with the provided one" { cd `chart_dir` local ca_cert_volume=$(helm template \ @@ -611,31 +626,32 @@ load _helpers yq -r '.spec.template' | tee /dev/stderr) # Check annotations + local actual actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-pre-populate-only"]' | tee /dev/stderr) [ "${actual}" = "true" ] - - local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) [ "${actual}" = "true" ] - - local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) [ "${actual}" = "aclrole" ] - local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-bootstrap-token"]' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-secret-bootstrap-token"') [ "${actual}" = "foo" ] - local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-bootstrap-token"]' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-template-bootstrap-token"') local expected=$'{{- with secret \"foo\" -}}\n{{- .Data.data.bar -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] # Check that the bootstrap token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') [ "${actual}" = "true" ] # Check that no (secret) volumes are not attached local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] } @@ -679,7 +695,7 @@ load _helpers local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] } @@ -821,11 +837,11 @@ load _helpers local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] # Check that the replication token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') [ "${actual}" = "true" ] } @@ -869,14 +885,14 @@ load _helpers local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] # Check that the replication and bootstrap token flags are set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') [ "${actual}" = "true" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') [ "${actual}" = "true" ] } @@ -899,7 +915,6 @@ load _helpers --set 'global.acls.partitionToken.secretKey=token' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=default" \ - --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | yq -r '.spec.template' | tee /dev/stderr) @@ -919,11 +934,11 @@ load _helpers local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] # Check that the replication token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-partition-token-file=/vault/secrets/partition-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-partition-token-file=/vault/secrets/partition-token"))') [ "${actual}" = "true" ] } @@ -998,7 +1013,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) @@ -1045,15 +1060,15 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1080,7 +1095,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-sync-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("sync-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1088,15 +1103,15 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1132,15 +1147,15 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1177,15 +1192,15 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1224,19 +1239,19 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] } @test "serverACLInit/Job: inject namespace options set with .global.enableConsulNamespaces=true and inject enabled" { @@ -1275,7 +1290,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1392,8 +1407,6 @@ load _helpers local object=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | @@ -1432,22 +1445,14 @@ load _helpers --set 'global.adminPartitions.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[7].name] | any(contains("CONSUL_PARTITION"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[7].value] | any(contains("default"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_LOGIN_PARTITION"))' | tee /dev/stderr) + yq 'any(contains("enable-partitions"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].value] | any(contains("default"))' | tee /dev/stderr) + yq 'any(contains("partition"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1607,47 +1612,46 @@ load _helpers @test "serverACLInit/Job: sets server address if externalServers.hosts are set" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo.com' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[2].name] | any(contains("CONSUL_ADDRESSES"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-server-address=\"foo.com\""))' | tee /dev/stderr) [ "${actual}" = "true" ] +} - local actual=$(echo $object | - yq '[.env[2].value] | any(contains("foo.com"))' | tee /dev/stderr) +@test "serverACLInit/Job: can pass cloud auto-join string to server address via externalServers.hosts" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=provider=my-cloud config=val' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-server-address=\"provider=my-cloud config=val\""))' | tee /dev/stderr) [ "${actual}" = "true" ] } @test "serverACLInit/Job: port 8501 is used by default" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=1.1.1.1' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[4].name] | any(contains("CONSUL_HTTP_PORT"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[4].value] | any(contains("8501"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-server-port=8501"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @test "serverACLInit/Job: can override externalServers.httpsPort" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'server.enabled=false' \ @@ -1655,14 +1659,7 @@ load _helpers --set 'externalServers.hosts[0]=1.1.1.1' \ --set 'externalServers.httpsPort=443' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[4].name] | any(contains("CONSUL_HTTP_PORT"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[4].value] | any(contains("443"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-server-port=443"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1698,7 +1695,7 @@ load _helpers @test "serverACLInit/Job: sets the CA cert if TLS is enabled and externalServers.enabled is true but externalServers.useSystemRoots is false" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ @@ -1707,20 +1704,13 @@ load _helpers --set 'externalServers.hosts[0]=1.1.1.1' \ --set 'externalServers.useSystemRoots=false' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_CACERT_FILE"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[8].value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-consul-ca-cert=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @test "serverACLInit/Job: sets the CA cert if TLS is enabled and externalServers.useSystemRoots is true but externalServers.enabled is false" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ @@ -1728,36 +1718,19 @@ load _helpers --set 'externalServers.hosts[0]=1.1.1.1' \ --set 'externalServers.useSystemRoots=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_CACERT_FILE"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[8].value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-consul-ca-cert=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @test "serverACLInit/Job: sets TLS server name if externalServers.tlsServerName is set" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=1.1.1.1' \ --set 'externalServers.tlsServerName=foo' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[9].name] | any(contains("CONSUL_TLS_SERVER_NAME"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[9].value] | any(contains("foo"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-consul-tls-server-name=foo"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1917,14 +1890,14 @@ load _helpers #-------------------------------------------------------------------- # controller -@test "serverACLInit/Job: -controller set by default" { +@test "serverACLInit/Job: -controller not set by default" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("controller"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] } @test "serverACLInit/Job: -controller set when controller.enabled=true" { @@ -1954,191 +1927,3 @@ load _helpers yq '.spec.template.spec.containers[0].command | any(contains("-federation"))' | tee /dev/stderr) [ "${actual}" = "true" ] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} diff --git a/charts/consul/test/unit/server-config-configmap.bats b/charts/consul/test/unit/server-config-configmap.bats index 985a0478fb..d31cbe774c 100755 --- a/charts/consul/test/unit/server-config-configmap.bats +++ b/charts/consul/test/unit/server-config-configmap.bats @@ -62,30 +62,6 @@ load _helpers [ "${actual}" = "release-name-consul-server.default.svc:8301" ] } -#-------------------------------------------------------------------- -# grpc - -@test "server/ConfigMap: if tls is disabled, grpc port is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-config-configmap.yaml \ - . | tee /dev/stderr | - yq -r '.data["server.json"]' | jq -r .ports.grpc | tee /dev/stderr) - - [ "${actual}" = "8502" ] -} - -@test "server/ConfigMap: if tls is enabled, grpc_tls port is set" { - cd `chart_dir` - local actual=$(helm template \ - --set 'global.tls.enabled=true' \ - -s templates/server-config-configmap.yaml \ - . | tee /dev/stderr | - yq -r '.data["server.json"]' | jq -r .ports.grpc_tls | tee /dev/stderr) - - [ "${actual}" = "8502" ] -} - #-------------------------------------------------------------------- # serflan @@ -142,7 +118,7 @@ load _helpers -s templates/server-config-configmap.yaml \ . | tee /dev/stderr | yq -r '.data["server.json"]' | jq .bootstrap_expect | tee /dev/stderr) - [ "${actual}" = "1" ] + [ "${actual}" = "3" ] } @test "server/ConfigMap: bootstrap_expect can be set by server.bootstrapExpect" { @@ -729,7 +705,6 @@ load _helpers local config=$(helm template \ -s templates/server-config-configmap.yaml \ --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | @@ -783,7 +758,6 @@ load _helpers local config=$(helm template \ -s templates/server-config-configmap.yaml \ --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.verify=false' \ @@ -857,7 +831,6 @@ load _helpers local object=$(helm template \ -s templates/server-config-configmap.yaml \ --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ @@ -951,8 +924,6 @@ load _helpers local actual=$(helm template \ -s templates/server-config-configmap.yaml \ --set 'global.peering.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.data["server.json"]' | jq -r .peering.enabled | tee /dev/stderr) diff --git a/charts/consul/test/unit/server-podsecuritypolicy.bats b/charts/consul/test/unit/server-podsecuritypolicy.bats index e862cd90d1..99902d1971 100644 --- a/charts/consul/test/unit/server-podsecuritypolicy.bats +++ b/charts/consul/test/unit/server-podsecuritypolicy.bats @@ -39,7 +39,7 @@ load _helpers --set 'server.exposeGossipAndRPCPorts=true' \ . | tee /dev/stderr | yq -c '.spec.hostPorts' | tee /dev/stderr) - [ "${actual}" = '[{"min":8300,"max":8300},{"min":8301,"max":8301},{"min":8302,"max":8302},{"min":8502,"max":8502}]' ] + [ "${actual}" = '[{"min":8300,"max":8300},{"min":8301,"max":8301},{"min":8302,"max":8302},{"min":8503,"max":8503}]' ] } @test "server/PodSecurityPolicy: hostPort 8300, server.ports.serflan.port and 8302 allowed when exposeGossipAndRPCPorts=true" { @@ -51,5 +51,5 @@ load _helpers --set 'server.ports.serflan.port=8333' \ . | tee /dev/stderr | yq -c '.spec.hostPorts' | tee /dev/stderr) - [ "${actual}" = '[{"min":8300,"max":8300},{"min":8333,"max":8333},{"min":8302,"max":8302},{"min":8502,"max":8502}]' ] + [ "${actual}" = '[{"min":8300,"max":8300},{"min":8333,"max":8333},{"min":8302,"max":8302},{"min":8503,"max":8503}]' ] } diff --git a/charts/consul/test/unit/server-service.bats b/charts/consul/test/unit/server-service.bats index 1aafd08fd4..c639d38b51 100755 --- a/charts/consul/test/unit/server-service.bats +++ b/charts/consul/test/unit/server-service.bats @@ -42,6 +42,11 @@ load _helpers # this is such an important part of making everything work we verify it here. @test "server/Service: tolerates unready endpoints" { cd `chart_dir` + local actual=$(helm template \ + -s templates/server-service.yaml \ + . | tee /dev/stderr | + yq -r '.metadata.annotations["service.alpha.kubernetes.io/tolerate-unready-endpoints"]' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(helm template \ -s templates/server-service.yaml \ @@ -98,13 +103,13 @@ load _helpers #-------------------------------------------------------------------- # annotations -@test "server/Service: no annotation by default" { +@test "server/Service: one annotation by default" { cd `chart_dir` local actual=$(helm template \ -s templates/server-service.yaml \ . | tee /dev/stderr | yq -r '.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "0" ] + [ "${actual}" = "1" ] } @test "server/Service: can set annotations" { diff --git a/charts/consul/test/unit/server-statefulset.bats b/charts/consul/test/unit/server-statefulset.bats index 7669bad309..a725162e01 100755 --- a/charts/consul/test/unit/server-statefulset.bats +++ b/charts/consul/test/unit/server-statefulset.bats @@ -45,9 +45,7 @@ load _helpers cd `chart_dir` run helm template \ -s templates/server-statefulset.yaml \ - --set 'server.bootstrapExpect=1' \ - --set 'server.replicas=3' \ - . + --set 'server.bootstrapExpect=1' . [ "$status" -eq 1 ] [[ "$output" =~ "server.bootstrapExpect cannot be less than server.replicas" ]] } @@ -62,6 +60,7 @@ load _helpers --set 'global.adminPartitions.enabled=true' \ --set 'global.federation.enabled=true' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "If global.federation.enabled is true, global.adminPartitions.enabled must be false because they are mutually exclusive" ]] } @@ -590,6 +589,28 @@ load _helpers [ "${actualBaz}" = "qux" ] } +#-------------------------------------------------------------------- +# DNS + +@test "server/StatefulSet: recursor flags unset by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("$recursor_flags")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/StatefulSet: add recursor flags if dns.enableRedirection is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/server-statefulset.yaml \ + --set 'dns.enableRedirection=true' \ + . | tee /dev/stderr | + yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("$recursor_flags")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # annotations @@ -657,7 +678,7 @@ load _helpers -s templates/server-statefulset.yaml \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = f3b00edc16ec09e90b7a20e379be5ddc1b10121ce60f602809a44130d2dc7aca ] + [ "${actual}" = 04cc39bf3f56ff39a2f4ae188fc37fc54b7775a073e8f97111eb37a548d7e229 ] } @test "server/StatefulSet: adds config-checksum annotation when extraConfig is provided" { @@ -667,7 +688,7 @@ load _helpers --set 'server.extraConfig="{\"hello\": \"world\"}"' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = b51ac0ef8e40d138e197f4ea86e55f3ceebb91fa07d15998809d8904d5a69606 ] + [ "${actual}" = e8d2e9535eb6e69eedebef725a66a8b47fd8845a77772f0e19911d2273b9b804 ] } @test "server/StatefulSet: adds config-checksum annotation when config is updated" { @@ -677,7 +698,7 @@ load _helpers --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = a37768452067ddd3a0ab44d1da18e167fe093946b14e43e915f094fa8c86c37f ] + [ "${actual}" = d5f4de988e9d51ff8ae91a24a1a990dc65ce046c0494836f6d0f0eae34108235 ] } #-------------------------------------------------------------------- @@ -1372,6 +1393,20 @@ load _helpers [[ "$output" =~ "global.secretsBackend.vault.consulServerRole must be provided if global.secretsBackend.vault.enabled=true" ]] } +@test "server/StatefulSet: fail when vault is enabled with tls but autoencrypt is disabled" { + cd `chart_dir` + run helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.server.serverCert.secretName=test' \ + --set 'global.tls.caCert.secretName=test' \ + --set 'global.tls.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.tls.enableAutoEncrypt must be true if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" ]] +} + @test "server/StatefulSet: fail when vault, tls are enabled but no caCert provided" { cd `chart_dir` run helm template \ @@ -1385,6 +1420,36 @@ load _helpers [[ "$output" =~ "global.tls.caCert.secretName must be provided if global.tls.enabled=true and global.secretsBackend.vault.enabled=true." ]] } +@test "server/StatefulSet: fail when vault, tls are enabled with a serverCert but no autoencrypt" { + cd `chart_dir` + run helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.enabled=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.tls.enableAutoEncrypt must be true if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" ]] +} + +@test "server/StatefulSet: fail when vault is enabled with tls but no consulCARole is provided" { + cd `chart_dir` + run helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.server.serverCert.secretName=test' \ + --set 'global.tls.caCert.secretName=test' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.secretsBackend.vault.consulCARole must be provided if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" ]] +} + @test "server/StatefulSet: vault annotations not set by default" { cd `chart_dir` local object=$(helm template \ @@ -1866,882 +1931,3 @@ load _helpers local actual="$(echo $object | yq -r '.spec.containers[] | select(.name=="consul").command | any(contains("-config-file=/vault/secrets/replication-token-config.hcl"))' | tee /dev/stderr)" [ "${actual}" = "true" ] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "server/StatefulSet: cloud config is not set in command when global.cloud.enabled is not set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - . | tee /dev/stderr) - - # Test the flag is set. - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[] | select(.name == "consul") | .command | any(contains("-hcl=\"cloud { resource_id = \\\"${HCP_RESOURCE_ID}\\\" }\""))' | tee /dev/stderr) - [ "${actual}" = "false" ] - - # Test the HCP_RESOURCE_ID environment variable is set. - local envvar=$(echo "$object" | - yq -r -c '.spec.template.spec.containers[] | select(.name == "consul") | .env | select(.name == "HCP_RESOURCE_ID")' | tee /dev/stderr) - [ "${envvar}" = "" ] -} - -@test "server/StatefulSet: does not create HCP_RESOURCE_ID, HCP_CLIENT_ID, HCP_CLIENT_SECRET, HCP_AUTH_URL, HCP_SCADA_ADDRESS, and HCP_API_HOSTNAME envvars in consul container when global.cloud.enabled is not set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - . | tee /dev/stderr ) - - local container=$(echo "$object" | - yq -r '.spec.template.spec.containers[] | select(.name == "consul")' | tee /dev/stderr) - - - local envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_CLIENT_ID")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_CLIENT_SECRET")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_RESOURCE_ID")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_AUTH_URL")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_API_HOSTNAME")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_SCADA_ADDRESS")' | tee /dev/stderr) - [ "${envvar}" = "" ] - -} - -@test "server/StatefulSet: cloud config is set in command when global.cloud.enabled and global.cloud.resourceId are set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr) - - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[] | select(.name == "consul") | .command | any(contains("-hcl=\"cloud { resource_id = \\\"${HCP_RESOURCE_ID}\\\" }\""))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - - -@test "server/StatefulSet: creates HCP_RESOURCE_ID, HCP_CLIENT_ID, HCP_CLIENT_SECRET envvars in consul container when global.cloud.enabled is true" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr ) - - local container=$(echo "$object" | - yq -r '.spec.template.spec.containers[] | select(.name == "consul")' | tee /dev/stderr) - - # HCP_CLIENT_ID - local envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_CLIENT_ID")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "client-id-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "client-id-key" ] - - # HCP_CLIENT_SECRET - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_CLIENT_SECRET")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "client-secret-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "client-secret-key" ] - - # HCP_RESOURCE_ID - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_RESOURCE_ID")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "resource-id-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "resource-id-key" ] -} - -@test "server/StatefulSet: creates HCP_AUTH_URL, HCP_SCADA_ADDRESS, and HCP_API_HOSTNAME envvars in consul container when global.cloud.enabled is true and those cloud values are specified" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.secretName=foo' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - --set 'global.cloud.apiHost.secretName=api-host-name' \ - --set 'global.cloud.apiHost.secretKey=api-host-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . | tee /dev/stderr ) - - local container=$(echo "$object" | - yq -r '.spec.template.spec.containers[] | select(.name == "consul")' | tee /dev/stderr) - - # HCP_AUTH_URL - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_AUTH_URL")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - echo "actual: $actual" - - [ "${actual}" = "auth-url-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "auth-url-key" ] - - # HCP_API_HOST - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_API_HOST")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "api-host-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "api-host-key" ] - - # HCP_SCADA_ADDRESS - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_SCADA_ADDRESS")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "scada-address-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "scada-address-key" ] -} - -@test "server/StatefulSet: cloud config is set in command global.cloud.enabled is not set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.acls.enabled=true' \ - --set 'global.acls.bootstrapToken.secretName=name' \ - --set 'global.acls.bootstrapToken.secretKey=key' \ - . | tee /dev/stderr) - - # Test the flag is set. - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[0].command | any(contains("-hcl=\"acl { tokens { initial_management = \\\"${ACL_BOOTSTRAP_TOKEN}\\\" } }\""))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - # Test the ACL_BOOTSTRAP_TOKEN environment variable is set. - local actual=$(echo "$object" | - yq -r -c '.spec.template.spec.containers[0].env | map(select(.name == "ACL_BOOTSTRAP_TOKEN"))' | tee /dev/stderr) - [ "${actual}" = '[{"name":"ACL_BOOTSTRAP_TOKEN","valueFrom":{"secretKeyRef":{"name":"name","key":"key"}}}]' ] -} - -@test "server/StatefulSet: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -#-------------------------------------------------------------------- -# server.snapshotAgent - -@test "server/StatefulSet: snapshot-agent: snapshot agent container not added by default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[] | select(.name == "consul-snapshot-agent")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - - -@test "server/StatefulSet: snapshot-agent: snapshot agent container added with server.snapshotAGent.enabled=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[] | select(.name == "consul-snapshot-agent") | .name' | tee /dev/stderr) - [ "${actual}" = "consul-snapshot-agent" ] -} - -@test "server/StatefulSet: snapshot-agent: when server.snapshotAgent.configSecret.secretKey!=null and server.snapshotAgent.configSecret.secretName=null, fail" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=' \ - --set 'server.snapshotAgent.configSecret.secretKey=bar' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "server.snapshotAgent.configSecret.secretKey and server.snapshotAgent.configSecret.secretName must both be specified." ]] -} - -@test "server/StatefulSet: snapshot-agent: when server.snapshotAgent.configSecret.secretName!=null and server.snapshotAgent.configSecret.secretKey=null, fail" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=foo' \ - --set 'server.snapshotAgent.configSecret.secretKey=' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "server.snapshotAgent.configSecret.secretKey and server.snapshotAgent.configSecret.secretName must both be specified." ]] -} - -@test "server/StatefulSet: snapshot-agent: adds volume for snapshot agent config secret when secret is configured" { - cd `chart_dir` - local vol=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-agent-user-config")' | tee /dev/stderr) - local actual - actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-user-config' ] - - actual=$(echo $vol | jq -r '. .secret.secretName' | tee /dev/stderr) - [ "${actual}" = 'a/b/c/d' ] - - actual=$(echo $vol | jq -r '. .secret.items[0].key' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-config' ] - - actual=$(echo $vol | jq -r '. .secret.items[0].path' | tee /dev/stderr) - [ "${actual}" = 'snapshot-config.json' ] -} - -@test "server/StatefulSet: snapshot-agent: adds volume mount to snapshot container for snapshot agent config secret when secret is configured" { - cd `chart_dir` - local vol=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "snapshot-agent-user-config")' | tee /dev/stderr) - local actual - actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-user-config' ] - - actual=$(echo $vol | jq -r '. .readOnly' | tee /dev/stderr) - [ "${actual}" = 'true' ] - - actual=$(echo $vol | jq -r '. .mountPath' | tee /dev/stderr) - [ "${actual}" = '/consul/user-config' ] -} - -@test "server/StatefulSet: snapshot-agent: set config-dir argument on snapshot agent command to volume mount" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-config-dir=/consul/user-config")' | tee /dev/stderr) - [ "${actual}" = 'true' ] -} - -@test "server/StatefulSet: snapshot-agent: does not configure snapshot agent login config secret when acls are disabled" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=false' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec' | tee /dev/stderr) - actual=$(echo $spec | yq -r '.volumes[] | select(.name == "snapshot-agent-config")') - [ "${actual}" = "" ] - - actual=$(echo $spec | yq -r '.containers[1].volumeMounts') - [ "${actual}" = "null" ] - - actual=$(echo $spec | yq -r '.containers[1].command[2] | contains("-config-file=/consul/config/snapshot-login.json")') - [ "${actual}" = "false" ] -} - -@test "server/StatefulSet: snapshot-agent: adds volume for snapshot agent login config secret when acls are enabled" { - cd `chart_dir` - local vol=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-agent-config")' | tee /dev/stderr) - local actual - actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-config' ] - - actual=$(echo $vol | jq -r '. .configMap.name' | tee /dev/stderr) - [ "${actual}" = 'release-name-consul-snapshot-agent-config' ] -} - -@test "server/StatefulSet: snapshot-agent: adds volume mount to snapshot container for snapshot agent login config secret when acls are enabled" { - cd `chart_dir` - local vol=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "snapshot-agent-config")' | tee /dev/stderr) - local actual - actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-config' ] - - actual=$(echo $vol | jq -r '. .readOnly' | tee /dev/stderr) - [ "${actual}" = 'true' ] - - actual=$(echo $vol | jq -r '. .mountPath' | tee /dev/stderr) - [ "${actual}" = '/consul/config' ] -} - -@test "server/StatefulSet: snapshot-agent: set config-file argument on snapshot agent command to login config when acls are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-config-file=/consul/config/snapshot-login.json")' | tee /dev/stderr) - [ "${actual}" = 'true' ] -} - -@test "server/StatefulSet: snapshot-agent: uses default consul addr when TLS is disabled" { - cd `chart_dir` - local env=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].env[]' | tee /dev/stderr) - - local actual - actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) - [ "${actual}" = 'http://127.0.0.1:8500' ] -} - -@test "server/StatefulSet: snapshot-agent: sets TLS env vars when global.tls.enabled" { - cd `chart_dir` - local env=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].env[]' | tee /dev/stderr) - - local actual - actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) - [ "${actual}" = 'https://127.0.0.1:8501' ] - - actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] -} - -@test "server/StatefulSet: snapshot-agent: populates container volumeMounts when global.tls.enabled is true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-ca-cert") | .name' | tee /dev/stderr) - [ "${actual}" = "consul-ca-cert" ] -} - -#-------------------------------------------------------------------- -# server.snapshotAgent.resources - -@test "server/StatefulSet: snapshot-agent: default resources" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - . | tee /dev/stderr | - yq -rc '.spec.template.spec.containers[1].resources' | tee /dev/stderr) - [ "${actual}" = '{"limits":{"cpu":"50m","memory":"50Mi"},"requests":{"cpu":"50m","memory":"50Mi"}}' ] -} - -@test "server/StatefulSet: snapshot-agent: can set resources" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.resources.requests.memory=100Mi' \ - --set 'server.snapshotAgent.resources.requests.cpu=100m' \ - --set 'server.snapshotAgent.resources.limits.memory=200Mi' \ - --set 'server.snapshotAgent.resources.limits.cpu=200m' \ - . | tee /dev/stderr | - yq -rc '.spec.template.spec.containers[1].resources' | tee /dev/stderr) - [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] -} - -#-------------------------------------------------------------------- -# server.snapshotAgent.caCert - -@test "server/StatefulSet: snapshot-agent: if caCert is set command is modified correctly" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- -MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("cat < /extra-ssl-certs/custom-ca.pem")' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/StatefulSet: snapshot-agent: if caCert is set extra-ssl-certs volumeMount is added" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- -MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ - . | tee /dev/stderr | yq -r '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo $object | jq -r '.volumes[] | select(.name == "extra-ssl-certs") | .name' | tee /dev/stderr) - [ "${actual}" = "extra-ssl-certs" ] -} - -@test "server/StatefulSet: snapshot-agent: if caCert is set SSL_CERT_DIR env var is set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- -MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ - . | tee /dev/stderr | yq -r '.spec.template.spec.containers[1].env[] | select(.name == "SSL_CERT_DIR")' | tee /dev/stderr) - - local actual=$(echo $object | jq -r '.name' | tee /dev/stderr) - [ "${actual}" = "SSL_CERT_DIR" ] - local actual=$(echo $object | jq -r '.value' | tee /dev/stderr) - [ "${actual}" = "/etc/ssl/certs:/extra-ssl-certs" ] -} - - -#-------------------------------------------------------------------- -# snapshotAgent license-autoload - -@test "server/StatefulSet: snapshot-agent: adds volume mount for license secret when enterprise license secret name and key are provided" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.enterpriseLicense.secretName=foo' \ - --set 'global.enterpriseLicense.secretKey=bar' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) - [ "${actual}" = '{"name":"consul-license","mountPath":"/consul/license","readOnly":true}' ] -} - -@test "server/StatefulSet: snapshot-agent: adds env var for license path when enterprise license secret name and key are provided" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.enterpriseLicense.secretName=foo' \ - --set 'global.enterpriseLicense.secretKey=bar' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].env[] | select(.name == "CONSUL_LICENSE_PATH")' | tee /dev/stderr) - [ "${actual}" = '{"name":"CONSUL_LICENSE_PATH","value":"/consul/license/bar"}' ] -} - -@test "server/StatefulSet: snapshot-agent: does not add license secret volume mount if manageSystemACLs are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.enterpriseLicense.secretName=foo' \ - --set 'global.enterpriseLicense.secretKey=bar' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "server/StatefulSet: snapshot-agent: does not add license env if manageSystemACLs are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.enterpriseLicense.secretName=foo' \ - --set 'global.enterpriseLicense.secretKey=bar' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].env[] | select(.name == "CONSUL_LICENSE_PATH")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -#-------------------------------------------------------------------- -# snapshotAgent Vault - -@test "server/StatefulSet: snapshot-agent: vault CONSUL_LICENSE_PATH is set to /vault/secrets/enterpriselicense.txt" { - cd `chart_dir` - local env=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'global.enterpriseLicense.secretName=a/b/c/d' \ - --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].env[]' | tee /dev/stderr) - - local actual - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LICENSE_PATH") | .value' | tee /dev/stderr) - [ "${actual}" = "/vault/secrets/enterpriselicense.txt" ] -} - -@test "server/StatefulSet: snapshot-agent: vault does not add volume mount for license secret" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'global.enterpriseLicense.secretName=a/b/c/d' \ - --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "server/StatefulSet: snapshot-agent: vault snapshot agent config annotations are correct when enabled" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=path/to/secret' \ - --set 'server.snapshotAgent.configSecret.secretKey=config' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json"]' | tee /dev/stderr) - [ "${actual}" = "path/to/secret" ] - - actual=$(echo $object | - yq -r '.annotations["vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json"]' | tee /dev/stderr) - local expected=$'{{- with secret \"path/to/secret\" -}}\n{{- .Data.data.config -}}\n{{- end -}}' - [ "${actual}" = "${expected}" ] - - actual=$(echo $object | jq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) - [ "${actual}" = "test" ] -} - -@test "server/StatefulSet: snapshot-agent: vault does not add volume for snapshot agent config secret" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-agent-user-config")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "server/StatefulSet: snapshot-agent: vault does not add volume mount for snapshot agent config secret" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "snapshot-agent-user-config")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "server/StatefulSet: snapshot-agent: vault sets config-file argument on snapshot agent command to config downloaded by vault agent injector" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-config-file=/vault/secrets/snapshot-agent-config.json")' | tee /dev/stderr) - [ "${actual}" = 'true' ] -} - -#-------------------------------------------------------------------- -# snapshotAgent Interval - -@test "server/StatefulSet: snapshot-agent: interval defaults to 1h" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-interval=1h")' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/StatefulSet: snapshot-agent: interval can be set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.interval=10h34m5s' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-interval=10h34m5s")' | tee /dev/stderr) - [ "${actual}" = "true" ] -} \ No newline at end of file diff --git a/charts/consul/test/unit/sync-catalog-deployment.bats b/charts/consul/test/unit/sync-catalog-deployment.bats index bd12ae78e6..0e0ac33a88 100755 --- a/charts/consul/test/unit/sync-catalog-deployment.bats +++ b/charts/consul/test/unit/sync-catalog-deployment.bats @@ -62,33 +62,21 @@ load _helpers [ "${actual}" = "bar" ] } -@test "syncCatalog/Deployment: consul env defaults" { +@test "syncCatalog/Deployment: command defaults" { cd `chart_dir` - local env=$(helm template \ - -s templates/sync-catalog-deployment.yaml \ + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-server.default.svc" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8502" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8500" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r ' any(contains("consul-k8s-control-plane sync-catalog"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_API_TIMEOUT").value' | tee /dev/stderr) - [ "${actual}" = "5s" ] + local actual=$(echo $object | + yq -r ' any(contains("consul-api-timeout=5"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -450,82 +438,285 @@ load _helpers #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "syncCatalog/Deployment: ACL auth method env vars are set when acls are enabled" { +@test "syncCatalog/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout -consul-api-timeout=5s"))' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] + [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_META").value' | tee /dev/stderr) - [ "${actual}" = 'component=sync-catalog,pod=$(NAMESPACE)/$(POD_NAME)' ] +@test "syncCatalog/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -@test "syncCatalog/Deployment: sets global auth method and primary datacenter when federation and acls and namespaces are enabled" { +@test "syncCatalog/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc1' \ - --set 'global.datacenter=dc2' \ - --set 'global.enableConsulNamespaces=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method-dc2" ] +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "sync-catalog-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "syncCatalog/Deployment: sets default login partition and acls and partitions are enabled" { +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { cd `chart_dir` - local env=$(helm template \ + local object=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.containers[] | select(.name == "sync-catalog")' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "default" ] + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "syncCatalog/Deployment: sets non-default login partition and acls and partitions are enabled" { +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { cd `chart_dir` - local env=$(helm template \ + local object=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.datacenter=dc2' \ --set 'global.enableConsulNamespaces=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -559,23 +750,17 @@ load _helpers cd `chart_dir` local env=$(helm template \ -s templates/sync-catalog-deployment.yaml \ - --set 'client.enabled=true' \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_USE_TLS").value' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } @test "syncCatalog/Deployment: can overwrite CA secret with the provided one" { @@ -601,17 +786,81 @@ load _helpers [ "${actual}" = "key" ] } -@test "syncCatalog/Deployment: consul-ca-cert volumeMount is added when TLS is enabled" { +@test "syncCatalog/Deployment: consul-auto-encrypt-ca-cert volume is not added with auto-encrypt and client.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "syncCatalog/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: consul-ca-cert volumeMount is added when TLS with auto-encrypt is enabled and client disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=false' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert") | length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } +@test "syncCatalog/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: adds both init containers when TLS with auto-encrypt and ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length == 2' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "syncCatalog/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { cd `chart_dir` local actual=$(helm template \ @@ -729,7 +978,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -844,6 +1093,79 @@ load _helpers [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] } + +#-------------------------------------------------------------------- +# clients.enabled + +@test "syncCatalog/Deployment: HOST_IP is used when client.enabled=true" { + cd `chart_dir` + local env=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'http://$(HOST_IP):8500' ] +} + +@test "syncCatalog/Deployment: HOST_IP is used when client.enabled=true and global.tls.enabled=true" { + cd `chart_dir` + local env=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "syncCatalog/Deployment: consul service is used when client.enabled=false and global.tls.enabled=true and autoencrypt on" { + cd `chart_dir` + local env=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://release-name-consul-server:8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "syncCatalog/Deployment: consul service is used when client.enabled=false and global.tls.enabled=true" { + cd `chart_dir` + local env=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://release-name-consul-server:8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + #-------------------------------------------------------------------- # priorityClassName @@ -949,6 +1271,36 @@ load _helpers [ "${actual}" = "true" ] } +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "syncCatalog/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault @@ -958,6 +1310,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -990,6 +1343,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1010,6 +1364,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1031,6 +1386,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1052,6 +1408,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1092,6 +1449,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1134,181 +1492,3 @@ reservedNameTest() { [ "$status" -eq 1 ] [[ "$output" =~ "The name $name set for key syncCatalog.consulNamespaces.consulDestinationNamespace is reserved by Consul for future use" ]] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "syncCatalog/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} diff --git a/charts/consul/test/unit/terminating-gateways-deployment.bats b/charts/consul/test/unit/terminating-gateways-deployment.bats index 3f312cf760..61fa50bb84 100644 --- a/charts/consul/test/unit/terminating-gateways-deployment.bats +++ b/charts/consul/test/unit/terminating-gateways-deployment.bats @@ -25,6 +25,41 @@ load _helpers [ "${actual}" = "release-name-consul-terminating-gateway" ] } +@test "terminatingGateways/Deployment: Adds consul service volumeMount to gateway container" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | yq '.spec.template.spec.containers[0].volumeMounts[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-service" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/service" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: consul-sidecar uses -consul-api-timeout" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[1].command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # prerequisites @@ -38,6 +73,40 @@ load _helpers [[ "$output" =~ "connectInject.enabled must be true" ]] } +@test "terminatingGateways/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true" ]] +} + +@test "terminatingGateways/Deployment: fails if global.enabled is false and clients are not explicitly enabled" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + +@test "terminatingGateways/Deployment: fails if global.enabled is true but clients are explicitly disabled" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + @test "terminatingGateways/Deployment: fails if there are duplicate gateway names" { cd `chart_dir` run helm template \ @@ -70,92 +139,69 @@ load _helpers } #-------------------------------------------------------------------- -# dataplaneImage +# envoyImage -@test "terminatingGateways/Deployment: dataplane image can be set using the global value" { +@test "terminatingGateways/Deployment: envoy image has default global value" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.imageConsulDataplane=new/image' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "new/image" ] + [[ "${actual}" =~ "envoyproxy/envoy:v" ]] } -#-------------------------------------------------------------------- -# global.tls.enabled - -@test "terminatingGateways/Deployment: sets TLS env variables for terminating-gateway-init when global.tls.enabled" { +@test "terminatingGateways/Deployment: envoy image can be set using the global value" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ + --set 'global.imageEnvoy=new/image' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_PORT") | .value' | tee /dev/stderr) - [ "${actual}" = '8501' ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_USE_TLS") | .value' | tee /dev/stderr) - [ "${actual}" = 'true' ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT_FILE") | .value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] + yq -s -r '.[0].spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "new/image" ] } -@test "terminatingGateways/Deployment: sets TLS env variables for terminating-gateway-init when global.tls.enabled=false" { +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "terminatingGateways/Deployment: sets TLS env variables when global.tls.enabled" { cd `chart_dir` local env=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_PORT") | .value' | tee /dev/stderr) - [ "${actual}" = '8500' ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_USE_TLS")' | tee /dev/stderr) - [ "${actual}" = '' ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_TLS_SERVER_NAME")' | tee /dev/stderr) - [ "${actual}" = "" ] + yq -s -r '.[0].spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT_FILE")' | tee /dev/stderr) - [ "${actual}" = "" ] -} + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] -@test "terminatingGateways/Deployment: sets TLS flags for terminating-gateway when global.tls.enabled is false" { - cd `chart_dir` - local object=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ - . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_GRPC_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8502' ] - local actual=$(echo $object | yq -r '. | contains("-tls-disabled")' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } -@test "terminatingGateways/Deployment: sets TLS flags for terminating-gateway when global.tls.enabled" { +@test "terminatingGateways/Deployment: sets TLS env variables in consul sidecar when global.tls.enabled" { cd `chart_dir` - local object=$(helm template \ + local env=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[1].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | contains("-ca-certs=/consul/tls/ca/tls.crt")' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } @test "terminatingGateways/Deployment: can overwrite CA secret with the provided one" { @@ -188,21 +234,67 @@ load _helpers --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - . | yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr ) + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) [ "${actual}" != "" ] } -@test "terminatingGateways/Deployment: CA cert volume omitted when TLS is enabled with external servers and use system roots" { +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "terminatingGateways/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ + -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ --set 'externalServers.useSystemRoots=true' \ - . | yq '.[0]spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr ) - [ "${actual}" == "" ] + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] } @test "terminatingGateways/Deployment: serviceAccountName is set properly" { @@ -222,73 +314,115 @@ load _helpers #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "terminatingGateways/Deployment: Adds consul envvars on terminating-gateway-init init container when ACLs are enabled and tls is enabled" { +@test "terminatingGateways/Deployment: consul-sidecar uses -token-file flag when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[1].command | any(contains("-token-file=/consul/service/acl-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on terminating-gateway-init init container when ACLs are enabled and tls is enabled" { cd `chart_dir` local env=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_AUTH_METHOD") | .value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://\$(HOST_IP):8501" ] +} - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_DATACENTER") | .value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] +@test "terminatingGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on terminating-gateway-init init container when ACLs are enabled and tls is not enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_META") | .value' | tee /dev/stderr) - [ "${actual}" = 'component=terminating-gateway,pod=$(NAMESPACE)/$(POD_NAME)' ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "http://\$(HOST_IP):8500" ] } -@test "terminatingGateways/Deployment: ACL flags are not set when acls are disabled" { +@test "terminatingGateways/Deployment: Does not add consul envvars CONSUL_CACERT on terminating-gateway-init init container when ACLs are enabled and tls is not enabled" { cd `chart_dir` - local object=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'terminatingGateways.enabled=true' \ - --set 'global.acls.manageSystemACLs=false' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].command[2]' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[1].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | contains("-login-bearer-path")' | tee /dev/stderr) - [ "${actual}" = "false" ] + [ "${actual}" = "" ] +} - local actual=$(echo $object | yq -r '. | contains("-login-meta")' | tee /dev/stderr) - [ "${actual}" = "false" ] +@test "terminatingGateways/Deployment: Adds consul envvars CONSUL_CACERT on terminating-gateway-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | contains("-login-method")' | tee /dev/stderr) - [ "${actual}" = "false" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} - local actual=$(echo $object | yq -r '. | contains("-credential-type=login")' | tee /dev/stderr) +@test "terminatingGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "terminatingGateways/Deployment: command flags are set when acls are enabled" { +@test "terminatingGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].command[2]' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '. | contains("-login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | yq -r '. | contains("-login-meta=component=terminating-gateway")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | yq -r '. | contains("-login-auth-method=release-name-consul-k8s-component-auth-method")' | tee /dev/stderr) + yq -s '[.[0].spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) [ "${actual}" = "true" ] +} - local actual=$(echo $object | yq -r '. | contains("-credential-type=login")' | tee /dev/stderr) +@test "terminatingGateways/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[3]] | any(contains("/consul-bin/consul logout"))' | tee /dev/stderr) [ "${actual}" = "true" ] } - #-------------------------------------------------------------------- # metrics @@ -328,7 +462,20 @@ load _helpers [ "${actual}" = "/metrics" ] } -@test "terminatingGateways/Deployment: when global.metrics.enableGatewayMetrics=false, does not set prometheus annotations" { +@test "terminatingGateways/Deployment: when global.metrics.enabled=true, sets proxy setting" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.metrics.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: when global.metrics.enableGatewayMetrics=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -339,6 +486,9 @@ load _helpers . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -349,7 +499,7 @@ load _helpers [ "${actual}" = "null" ] } -@test "terminatingGateways/Deployment: when global.metrics.enabled=false, does not set prometheus annotations" { +@test "terminatingGateways/Deployment: when global.metrics.enabled=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -359,6 +509,9 @@ load _helpers . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -629,6 +782,150 @@ load _helpers [ "${actual}" = "gwcpu2" ] } +#-------------------------------------------------------------------- +# init container resources + +@test "terminatingGateways/Deployment: init container has default resources" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "25Mi" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "50m" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "150Mi" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "50m" ] +} + +@test "terminatingGateways/Deployment: init container resources can be set through defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +@test "terminatingGateways/Deployment: init container resources can be set through specific gateway, overriding defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.limits.cpu=cpu2' \ + --set 'terminatingGateways.gateways[0].name=gateway1' \ + --set 'terminatingGateways.gateways[0].initCopyConsulContainer.resources.requests.memory=gwmemory' \ + --set 'terminatingGateways.gateways[0].initCopyConsulContainer.resources.requests.cpu=gwcpu' \ + --set 'terminatingGateways.gateways[0].initCopyConsulContainer.resources.limits.memory=gwmemory2' \ + --set 'terminatingGateways.gateways[0].initCopyConsulContainer.resources.limits.cpu=gwcpu2' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu2" ] +} + +#-------------------------------------------------------------------- +# consul sidecar resources + +@test "terminatingGateways/Deployment: consul sidecar has default resources" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "25Mi" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "20m" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "50Mi" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "20m" ] +} + +@test "terminatingGateways/Deployment: consul sidecar resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=memory' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=cpu' \ + --set 'global.consulSidecarContainer.resources.limits.memory=memory2' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +@test "terminatingGateways/Deployment: fails if global.lifecycleSidecarContainer is set" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.lifecycleSidecarContainer.resources.requests.memory=100Mi' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." ]] +} + #-------------------------------------------------------------------- # affinity @@ -840,7 +1137,7 @@ load _helpers --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "3" ] + [ "${actual}" = "1" ] } @test "terminatingGateways/Deployment: extra annotations can be set through defaults" { @@ -855,7 +1152,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "5" ] + [ "${actual}" = "3" ] local actual=$(echo $object | yq -r '.key1' | tee /dev/stderr) [ "${actual}" = "value1" ] @@ -877,7 +1174,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "5" ] + [ "${actual}" = "3" ] local actual=$(echo $object | yq -r '.key1' | tee /dev/stderr) [ "${actual}" = "value1" ] @@ -900,7 +1197,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "6" ] + [ "${actual}" = "4" ] local actual=$(echo $object | yq -r '.defaultkey' | tee /dev/stderr) [ "${actual}" = "defaultvalue" ] @@ -913,23 +1210,87 @@ key2: value2' \ } #-------------------------------------------------------------------- -# consul namespaces +# terminating-gateway-init init container command -@test "terminatingGateways/Deployment: namespace annotation is not present by default" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container defaults" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) - local actual=$(echo $object | yq -r 'any(contains("consul.hashicorp.com/gateway-namespace"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + exp=' +cat > /consul/service/service.hcl << EOF +service { + kind = "terminating-gateway" + name = "terminating-gateway" + id = "${POD_NAME}" + address = "${POD_IP}" + port = 8443 + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] } +EOF +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} -@test "terminatingGateways/Deployment: consulNamespace is set as an annotation" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container with acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'terminatingGateways.gateways[0].name=terminating' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane acl-init \ + -component-name=terminating-gateway/release-name-consul-terminating \ + -acl-auth-method=release-name-consul-k8s-component-auth-method \ + -token-sink-file=/consul/service/acl-token \ + -consul-api-timeout=5s \ + -log-level=info \ + -log-json=false + +cat > /consul/service/service.hcl << EOF +service { + kind = "terminating-gateway" + name = "terminating" + id = "${POD_NAME}" + address = "${POD_IP}" + port = 8443 + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + -token-file=/consul/service/acl-token \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "terminatingGateways/Deployment: terminating-gateway-init init container gateway namespace can be specified through defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -938,12 +1299,35 @@ key2: value2' \ --set 'global.enableConsulNamespaces=true' \ --set 'terminatingGateways.defaults.consulNamespace=namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp=' +cat > /consul/service/service.hcl << EOF +service { + kind = "terminating-gateway" + name = "terminating-gateway" + id = "${POD_NAME}" + namespace = "namespace" + address = "${POD_IP}" + port = 8443 + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' - [ "${actual}" = "namespace" ] + [ "${actual}" = "${exp}" ] } -@test "terminatingGateways/Deployment: consulNamespace is set as an annotation when set on the individual gateway" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container gateway namespace can be specified through specific gateway overriding defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -954,9 +1338,90 @@ key2: value2' \ --set 'terminatingGateways.gateways[0].name=terminating-gateway' \ --set 'terminatingGateways.gateways[0].consulNamespace=new-namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp=' +cat > /consul/service/service.hcl << EOF +service { + kind = "terminating-gateway" + name = "terminating-gateway" + id = "${POD_NAME}" + namespace = "new-namespace" + address = "${POD_IP}" + port = 8443 + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +#-------------------------------------------------------------------- +# namespaces + +@test "terminatingGateways/Deployment: namespace command flag is not present by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] - [ "${actual}" = "new-namespace" ] + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "terminatingGateways/Deployment: namespace command flag is specified through defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'terminatingGateways.defaults.consulNamespace=namespace' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-namespace=namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace=namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: namespace command flag is specified through specific gateway overriding defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'terminatingGateways.defaults.consulNamespace=namespace' \ + --set 'terminatingGateways.gateways[0].name=terminating-gateway' \ + --set 'terminatingGateways.gateways[0].consulNamespace=new-namespace' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-namespace=new-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace=new-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -973,24 +1438,29 @@ key2: value2' \ local actual=$(echo $object | yq -r '.command | any(contains("-partition"))' | tee /dev/stderr) [ "${actual}" = "false" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-partition"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -# TODO re-enable this when integrating dataplane -# @test "terminatingGateways/Deployment: partition command flag is specified through partition name" { -# cd `chart_dir` -# local object=$(helm template \ -# -s templates/terminating-gateways-deployment.yaml \ -# --set 'terminatingGateways.enabled=true' \ -# --set 'connectInject.enabled=true' \ -# --set 'global.enableConsulNamespaces=true' \ -# --set 'global.adminPartitions.enabled=true' \ -# --set 'global.adminPartitions.name=default' \ -# . | tee /dev/stderr | -# yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) +@test "terminatingGateways/Deployment: partition command flag is specified through partition name" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) -# local actual=$(echo $object | yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) -# [ "${actual}" = "true" ] -# } + local actual=$(echo $object | yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} @test "terminatingGateways/Deployment: fails if admin partitions are enabled but namespaces aren't" { cd `chart_dir` @@ -1035,16 +1505,49 @@ key2: value2' \ [ "${actual}" = "false" ] } +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "terminatingGateways/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.gateways[0].name=gateway1' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault -@test "terminatingGateways/Deployment: configures server CA to come from vault when vault is enabled" { +@test "terminatingGateway/Deployment: configures server CA to come from vault when vault is enabled" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -1070,13 +1573,14 @@ key2: value2' \ [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] } -@test "terminatingGateways/Deployment: vault CA is not configured by default" { +@test "terminatingGateway/Deployment: vault CA is not configured by default" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1090,13 +1594,14 @@ key2: value2' \ [ "${actual}" = "false" ] } -@test "terminatingGateways/Deployment: vault CA is not configured when secretName is set but secretKey is not" { +@test "terminatingGateway/Deployment: vault CA is not configured when secretName is set but secretKey is not" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1111,13 +1616,14 @@ key2: value2' \ [ "${actual}" = "false" ] } -@test "terminatingGateways/Deployment: vault CA is not configured when secretKey is set but secretName is not" { +@test "terminatingGateway/Deployment: vault CA is not configured when secretKey is set but secretName is not" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1132,13 +1638,14 @@ key2: value2' \ [ "${actual}" = "false" ] } -@test "terminatingGateways/Deployment: vault CA is configured when both secretName and secretKey are set" { +@test "terminatingGateway/Deployment: vault CA is configured when both secretName and secretKey are set" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1157,7 +1664,7 @@ key2: value2' \ #-------------------------------------------------------------------- # Vault agent annotations -@test "terminatingGateways/Deployment: no vault agent annotations defined by default" { +@test "terminatingGateway/Deployment: no vault agent annotations defined by default" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -1169,17 +1676,18 @@ key2: value2' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role") | del(."consul.hashicorp.com/gateway-consul-service-name") | del(."consul.hashicorp.com/gateway-kind")' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) [ "${actual}" = "{}" ] } -@test "terminatingGateways/Deployment: vault agent annotations can be set" { +@test "terminatingGateway/Deployment: vault agent annotations can be set" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1190,203 +1698,3 @@ key2: value2' \ yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) [ "${actual}" = "bar" ] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "terminatingGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/consul/test/unit/terminating-gateways-service.bats b/charts/consul/test/unit/terminating-gateways-service.bats deleted file mode 100644 index d831e512e6..0000000000 --- a/charts/consul/test/unit/terminating-gateways-service.bats +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "terminatingGateways/Service: disabled by default" { - cd `chart_dir` - assert_empty helm template \ - -s templates/terminating-gateways-service.yaml \ - . -} - -@test "terminatingGateways/Service: enabled with terminatingGateways and connectInject enabled" { - cd `chart_dir` - local object=$(helm template \ - -s templates/terminating-gateways-service.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -s '.[0]' | tee /dev/stderr) - - local actual=$(echo $object | yq '. | length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# multiple gateways - -@test "terminatingGateways/Service: multiple gateways" { - cd `chart_dir` - local object=$(helm template \ - -s templates/terminating-gateways-service.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'terminatingGateways.gateways[0].name=gateway1' \ - --set 'terminatingGateways.gateways[1].name=gateway2' \ - . | tee /dev/stderr | - yq -s -r '.' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '.[0].metadata.name' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-gateway1" ] - - local actual=$(echo $object | yq -r '.[1].metadata.name' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-gateway2" ] - - local actual=$(echo "$object" | - yq -r '.[2] | length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - diff --git a/charts/consul/test/unit/webhook-cert-manager-clusterrole.bats b/charts/consul/test/unit/webhook-cert-manager-clusterrole.bats index 5003f3cae2..5f7a03c319 100644 --- a/charts/consul/test/unit/webhook-cert-manager-clusterrole.bats +++ b/charts/consul/test/unit/webhook-cert-manager-clusterrole.bats @@ -2,13 +2,11 @@ load _helpers -@test "webhookCertManager/ClusterRole: enabled by default" { +@test "webhookCertManager/ClusterRole: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/webhook-cert-manager-clusterrole.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "webhookCertManager/ClusterRole: enabled with controller.enabled=true and connectInject.enabled=false" { diff --git a/charts/consul/test/unit/webhook-cert-manager-clusterrolebinding.bats b/charts/consul/test/unit/webhook-cert-manager-clusterrolebinding.bats index 90121e0a7b..ffabf41ee7 100644 --- a/charts/consul/test/unit/webhook-cert-manager-clusterrolebinding.bats +++ b/charts/consul/test/unit/webhook-cert-manager-clusterrolebinding.bats @@ -2,13 +2,11 @@ load _helpers -@test "webhookCertManager/ClusterRoleBinding: enabled by default" { +@test "webhookCertManager/ClusterRoleBinding: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/webhook-cert-manager-clusterrolebinding.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "webhookCertManager/ClusterRoleBinding: enabled with controller.enabled=true and connectInject.enabled=false" { diff --git a/charts/consul/test/unit/webhook-cert-manager-configmap.bats b/charts/consul/test/unit/webhook-cert-manager-configmap.bats index 34521f17e6..7d7262b9af 100644 --- a/charts/consul/test/unit/webhook-cert-manager-configmap.bats +++ b/charts/consul/test/unit/webhook-cert-manager-configmap.bats @@ -2,13 +2,11 @@ load _helpers -@test "webhookCertManager/Configmap: enabled by default" { +@test "webhookCertManager/Configmap: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/webhook-cert-manager-configmap.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "webhookCertManager/Configmap: enabled with controller.enabled=true" { diff --git a/charts/consul/test/unit/webhook-cert-manager-deployment.bats b/charts/consul/test/unit/webhook-cert-manager-deployment.bats index d01d8da061..71d2e35c3a 100644 --- a/charts/consul/test/unit/webhook-cert-manager-deployment.bats +++ b/charts/consul/test/unit/webhook-cert-manager-deployment.bats @@ -2,13 +2,11 @@ load _helpers -@test "webhookCertManager/Deployment: enabled by default" { +@test "webhookCertManager/Deployment: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/webhook-cert-manager-deployment.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "webhookCertManager/Deployment: enabled with controller.enabled=true and connectInject.enabled=false" { diff --git a/charts/consul/test/unit/webhook-cert-manager-serviceaccount.bats b/charts/consul/test/unit/webhook-cert-manager-serviceaccount.bats index 9d833071ce..e4307c9409 100644 --- a/charts/consul/test/unit/webhook-cert-manager-serviceaccount.bats +++ b/charts/consul/test/unit/webhook-cert-manager-serviceaccount.bats @@ -2,13 +2,11 @@ load _helpers -@test "webhookCertManager/ServiceAccount: enabled by default" { +@test "webhookCertManager/ServiceAccount: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/webhook-cert-manager-serviceaccount.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "webhookCertManager/ServiceAccount: enabled with controller.enabled=true and connectInject.enabled=false" { diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index 28c8fc5990..564de43780 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -34,6 +34,22 @@ global: # If true, the Helm chart enables Cluster Peering for the cluster. This option enables peering controllers and # allows use of the PeeringAcceptor and PeeringDialer CRDs for establishing service mesh peerings. enabled: false + tokenGeneration: + serverAddresses: + # Source can be set to "","consul" or "static". + # + # "" is the default source. If servers are enabled, it will check if `server.exposeService` is enabled, and read + # the addresses from that service to use as the peering token server addresses. If using admin partitions and + # only Consul client agents are enabled, the addresses in `externalServers.hosts` and `externalServers.grpcPort` + # will be used. + # + # "consul" will use the Consul advertise addresses in the peering token. + # + # "static" will use the addresses specified in `global.peering.tokenGeneration.serverAddresses.static`. + source: "" + # Static addresses must be formatted "hostname|ip:port" where the port is the Consul server(s)' grpc port. + # @type: array + static: [] # [Enterprise Only] Enabling `adminPartitions` allows creation of Admin Partitions in Kubernetes clusters. # It additionally indicates that you are running Consul Enterprise v1.11+ with a valid Consul Enterprise @@ -50,6 +66,36 @@ global: # Must be "default" in the server cluster ie the Kubernetes cluster that the Consul server pods are deployed onto. name: "default" + # Partition service properties. + service: + type: LoadBalancer + # Optionally set the nodePort value of the partition service if using a NodePort service. + # If not set and using a NodePort service, Kubernetes will automatically assign + # a port. + nodePort: + + # RPC node port + # @type: integer + rpc: null + + # Serf node port + # @type: integer + serf: null + + # HTTPS node port + # @type: integer + https: null + + # Annotations to apply to the partition service. + # + # ```yaml + # annotations: | + # "annotation-key": "annotation-value" + # ``` + # + # @type: string + annotations: null + # The name (and tag) of the Consul Docker image for clients and servers. # This can be overridden per component. This should be pinned to a specific # version tag, otherwise you may inadvertently upgrade your Consul version. @@ -63,7 +109,7 @@ global: # image: "hashicorp/consul-enterprise:1.10.0-ent" # ``` # @default: hashicorp/consul: - image: "hashicorp/consul:1.14.0-beta1" + image: "hashicorp/consul:1.13.2" # Array of objects containing image pull secret names that will be applied to each service account. # This can be used to reference image pull secrets if using a custom consul or consul-k8s-control-plane Docker image. @@ -83,7 +129,7 @@ global: # image that is used for functionality such as catalog sync. # This can be overridden per component. # @default: hashicorp/consul-k8s-control-plane: - imageK8S: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.0.0-dev + imageK8S: hashicorp/consul-k8s-control-plane:0.49.0 # The name of the datacenter that the agents should # register as. This can't be changed once the Consul cluster is up and running @@ -138,6 +184,17 @@ global: # and check the name of `metadata.name`. consulClientRole: "" + # [Enterprise Only] The Vault role for the Consul client snapshot agent. + # The role must be connected to the Consul client snapshot agent's service account. + # The role must also have a policy with read capabilities for the snapshot agent config + # defined by the `client.snapshotAgent.configSecret.secretName` value. + # To discover the service account name of the Consul client, run + # ```shell-session + # $ helm template --show-only templates/client-snapshot-agent-serviceaccount.yaml --set client.snapshotAgent.enabled=true hashicorp/consul + # ``` + # and check the name of `metadata.name`. + consulSnapshotAgentRole: "" + # A Vault role for the Consul `server-acl-init` job, which manages setting ACLs so that clients and components can obtain ACL tokens. # The role must be connected to the `server-acl-init` job's service account. # The role must also have a policy with read and write capabilities for the bootstrap, replication or partition tokens @@ -563,17 +620,34 @@ global: # @type: boolean enableGatewayMetrics: true + # For connect-injected pods, the consul sidecar is responsible for metrics merging. For ingress/mesh/terminating + # gateways, it additionally ensures the Consul services are always registered with their local Consul client. + # @type: map + consulSidecarContainer: + # Set default resources for consul sidecar. If null, that resource won't + # be set. + # These settings can be overridden on a per-pod basis via these annotations: + # + # - `consul.hashicorp.com/consul-sidecar-cpu-limit` + # - `consul.hashicorp.com/consul-sidecar-cpu-request` + # - `consul.hashicorp.com/consul-sidecar-memory-limit` + # - `consul.hashicorp.com/consul-sidecar-memory-request` + # @recurse: false + # @type: map + resources: + requests: + memory: "25Mi" + cpu: "20m" + limits: + memory: "50Mi" + cpu: "20m" + # The name (and tag) of the Envoy Docker image used for the # connect-injected sidecar proxies and mesh, terminating, and ingress gateways. # See https://www.consul.io/docs/connect/proxies/envoy for full compatibility matrix between Consul and Envoy. # @default: envoyproxy/envoy-alpine: imageEnvoy: "envoyproxy/envoy:v1.23.1" - # The name (and tag) of the consul-dataplane Docker image used for the - # connect-injected sidecar proxies and mesh, terminating, and ingress gateways. - # @default: hashicorp/consul-dataplane: - imageConsulDataplane: hashicorp/consul-dataplane:1.0.0-beta2 - # Configuration for running this Helm chart on the Red Hat OpenShift platform. # This Helm chart currently supports OpenShift v4.x+. openshift: @@ -585,73 +659,6 @@ global: # the API before cancelling the request. consulAPITimeout: 5s - # Enables installing an HCP Consul self-managed cluster. - # Requires Consul v1.14+. - cloud: - # If true, the Helm chart will enable the installation of an HCP Consul - # self-managed cluster. - enabled: false - - # The name of the Kubernetes secret that holds the HCP resource id. - # This is required when global.cloud.enabled is true. - resourceId: - # The name of the Kubernetes secret that holds the resource id. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the resource id. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud client id. - # This is required when global.cloud.enabled is true. - clientId: - # The name of the Kubernetes secret that holds the client id. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the client id. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud client secret. - # This is required when global.cloud.enabled is true. - clientSecret: - # The name of the Kubernetes secret that holds the client secret. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the client secret. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud client id. - # This is optional when global.cloud.enabled is true. - apiHost: - # The name of the Kubernetes secret that holds the api hostname. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the api hostname. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud authorization url. - # This is optional when global.cloud.enabled is true. - authUrl: - # The name of the Kubernetes secret that holds the authorization url. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the authorization url. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud scada address. - # This is optional when global.cloud.enabled is true. - scadaAddress: - # The name of the Kubernetes secret that holds the scada address. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the scada address. - # @type: string - secretKey: null - # Server, when enabled, configures a server cluster to run. This should # be disabled if you plan on connecting to a Consul cluster external to # the Kube cluster. @@ -672,7 +679,7 @@ server: # The number of server agents to run. This determines the fault tolerance of # the cluster. Please see the deployment table (https://consul.io/docs/internals/consensus#deployment-table) # for more information. - replicas: 1 + replicas: 3 # The number of servers that are expected to be running. # It defaults to server.replicas. @@ -1079,56 +1086,6 @@ server: # @type: map extraEnvironmentVars: {} - # [Enterprise Only] Values for setting up and running snapshot agents - # (https://consul.io/commands/snapshot/agent) - # within the Consul clusters. They run as a sidecar with Consul servers. - snapshotAgent: - # If true, the chart will install resources necessary to run the snapshot agent. - enabled: false - - # Interval at which to perform snapshots. - # See https://www.consul.io/commands/snapshot/agent#interval - # @type: string - interval: 1h - - # A Kubernetes or Vault secret that should be manually created to contain the entire - # config to be used on the snapshot agent. - # This is the preferred method of configuration since there are usually storage - # credentials present. Please see Snapshot agent config (https://consul.io/commands/snapshot/agent#config-file-options) - # for details. - configSecret: - # The name of the Kubernetes secret or Vault secret path that holds the snapshot agent config. - # @type: string - secretName: null - # The key within the Kubernetes secret or Vault secret key that holds the snapshot agent config. - # @type: string - secretKey: null - - # The resource settings for snapshot agent pods. - # @recurse: false - # @type: map - resources: - requests: - memory: "50Mi" - cpu: "50m" - limits: - memory: "50Mi" - cpu: "50m" - - # Optional PEM-encoded CA certificate that will be added to the trusted system CAs. - # Useful if using an S3-compatible storage exposing a self-signed certificate. - # - # Example: - # - # ```yaml - # caCert: | - # -----BEGIN CERTIFICATE----- - # MIIC7jCCApSgAwIBAgIRAIq2zQEVexqxvtxP6J0bXAwwCgYIKoZIzj0EAwIwgbkx - # ... - # ``` - # @type: string - caCert: null - # Configuration for Consul servers when the servers are running outside of Kubernetes. # When running external servers, configuring these values is recommended # if setting `global.tls.enableAutoEncrypt` to true @@ -1153,7 +1110,7 @@ externalServers: httpsPort: 8501 # The GRPC port of the Consul servers. - grpcPort: 8502 + grpcPort: 8503 # The server name to use as the SNI host header when connecting with HTTPS. # @type: string @@ -1188,8 +1145,9 @@ client: # If true, the chart will install all # the resources necessary for a Consul client on every Kubernetes node. This _does not_ require # `server.enabled`, since the agents can be configured to join an external cluster. + # @default: global.enabled # @type: boolean - enabled: false + enabled: "-" # The name of the Docker image (including any tag) for the containers # running Consul client agents. @@ -1462,6 +1420,73 @@ client: # @type: string updateStrategy: null + # [Enterprise Only] Values for setting up and running snapshot agents + # (https://consul.io/commands/snapshot/agent) + # within the Consul clusters. They are required to be co-located with Consul clients, + # so will inherit the clients' nodeSelector, tolerations and affinity. + snapshotAgent: + # If true, the chart will install resources necessary to run the snapshot agent. + enabled: false + + # The number of snapshot agents to run. + replicas: 2 + + # Interval at which to perform snapshots. + # See https://www.consul.io/commands/snapshot/agent#interval + # @type: string + interval: 1h + + # A Kubernetes or Vault secret that should be manually created to contain the entire + # config to be used on the snapshot agent. + # This is the preferred method of configuration since there are usually storage + # credentials present. Please see Snapshot agent config (https://consul.io/commands/snapshot/agent#config-file-options) + # for details. + configSecret: + # The name of the Kubernetes secret or Vault secret path that holds the snapshot agent config. + # @type: string + secretName: null + # The key within the Kubernetes secret or Vault secret key that holds the snapshot agent config. + # @type: string + secretKey: null + + serviceAccount: + # This value defines additional annotations for the snapshot agent service account. This should be formatted as a + # multi-line string. + # + # ```yaml + # annotations: | + # "sample/annotation1": "foo" + # "sample/annotation2": "bar" + # ``` + # + # @type: string + annotations: null + + # The resource settings for snapshot agent pods. + # @recurse: false + # @type: map + resources: + requests: + memory: "50Mi" + cpu: "50m" + limits: + memory: "50Mi" + cpu: "50m" + + # Optional PEM-encoded CA certificate that will be added to the trusted system CAs. + # Useful if using an S3-compatible storage exposing a self-signed certificate. + # + # Example: + # + # ```yaml + # caCert: | + # -----BEGIN CERTIFICATE----- + # MIIC7jCCApSgAwIBAgIRAIq2zQEVexqxvtxP6J0bXAwwCgYIKoZIzj0EAwIwgbkx + # ... + # ``` + # @type: string + caCert: null + # Configuration for DNS configuration within the Kubernetes cluster. # This creates a service that routes to all agents (client or server) # for serving DNS requests. This DOES NOT automatically configure kube-dns @@ -1730,7 +1755,7 @@ syncCatalog: # If mirroring is enabled, avoid creating any Consul resources in the following # Kubernetes namespaces, as Consul currently reserves these namespaces for # system use: "system", "universal", "operator", "root". - mirroringK8S: true + mirroringK8S: false # If `mirroringK8S` is set to true, `mirroringK8SPrefix` allows each Consul namespace # to be given a prefix. For example, if `mirroringK8SPrefix` is set to "k8s-", a @@ -1881,7 +1906,7 @@ syncCatalog: connectInject: # True if you want to enable connect injection. Set to "-" to inherit from # global.enabled. - enabled: true + enabled: false # The number of deployment replicas. replicas: 2 @@ -1929,11 +1954,6 @@ connectInject: # @type: integer maxUnavailable: null - # The minimum number of available pods. - # Takes precedence over maxUnavailable if set. - # @type: integer - minAvailable: null - # Configures consul-cni plugin for Consul Service mesh services cni: # If true, then all traffic redirection setup will use the consul-cni plugin. @@ -2023,18 +2043,18 @@ connectInject: # add a listener on the Envoy sidecar to expose metrics. The exposed # metrics will depend on whether metrics merging is enabled: # - If metrics merging is enabled: - # the consul-dataplane will run a merged metrics server + # the Consul sidecar will run a merged metrics server # combining Envoy sidecar and Connect service metrics, # i.e. if your service exposes its own Prometheus metrics. # - If metrics merging is disabled: # the listener will just expose Envoy sidecar metrics. # This will inherit from `global.metrics.enabled`. defaultEnabled: "-" - # Configures the consul-dataplane to run a merged metrics server + # Configures the Consul sidecar to run a merged metrics server # to combine and serve both Envoy and Connect service metrics. # This feature is available only in Consul v1.10.0 or greater. defaultEnableMerging: false - # Configures the port at which the consul-dataplane will listen on to return + # Configures the port at which the Consul sidecar will listen on to return # combined metrics. This port only needs to be changed if it conflicts with # the application's ports. defaultMergedMetricsPort: 20100 @@ -2185,7 +2205,7 @@ connectInject: # `consulDestinationNamespace` setting. If mirroring is enabled, avoid creating any Consul # resources in the following Kubernetes namespaces, as Consul currently reserves these # namespaces for system use: "system", "universal", "operator", "root". - mirroringK8S: true + mirroringK8S: false # If `mirroringK8S` is set to true, `mirroringK8SPrefix` allows each Consul namespace # to be given a prefix. For example, if `mirroringK8SPrefix` is set to "k8s-", a @@ -2303,7 +2323,7 @@ connectInject: # ServiceIntentions require consul 1.9+. controller: # Enables the controller for managing custom resources. - enabled: true + enabled: false # The number of deployment replicas. replicas: 1 @@ -2427,6 +2447,9 @@ meshGateway: # The service option configures the Service that fronts the Gateway Deployment. service: + # Whether to create a Service or not. + enabled: true + # Type of service, ex. LoadBalancer, ClusterIP. type: LoadBalancer @@ -2507,6 +2530,18 @@ meshGateway: memory: "100Mi" cpu: "100m" + # The resource settings for the `copy-consul-bin` init container. + # @recurse: false + # @type: map + initCopyConsulContainer: + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "150Mi" + cpu: "50m" + # The resource settings for the `service-init` init container. # @recurse: false # @type: map @@ -2658,6 +2693,18 @@ ingressGateways: memory: "100Mi" cpu: "100m" + # The resource settings for the `copy-consul-bin` init container. + # @recurse: false + # @type: map + initCopyConsulContainer: + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "150Mi" + cpu: "50m" + # By default, we set an anti-affinity so that two of the same gateway pods # won't be on the same node. NOTE: Gateways require that Consul client agents are # also running on the nodes alongside each gateway pod. @@ -2780,6 +2827,18 @@ terminatingGateways: memory: "100Mi" cpu: "100m" + # The resource settings for the `copy-consul-bin` init container. + # @recurse: false + # @type: map + initCopyConsulContainer: + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "150Mi" + cpu: "50m" + # By default, we set an anti-affinity so that two of the same gateway pods # won't be on the same node. NOTE: Gateways require that Consul client agents are # also running on the nodes alongside each gateway pod. diff --git a/charts/demo/.helmignore b/charts/demo/.helmignore deleted file mode 100644 index 0e8a0eb36f..0000000000 --- a/charts/demo/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/charts/demo/Chart.yaml b/charts/demo/Chart.yaml deleted file mode 100644 index 82fc51d2df..0000000000 --- a/charts/demo/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: consul-demo -description: A Helm chart for Consul demo app - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.1.0" diff --git a/charts/demo/templates/frontend.yaml b/charts/demo/templates/frontend.yaml deleted file mode 100644 index c72fad0d08..0000000000 --- a/charts/demo/templates/frontend.yaml +++ /dev/null @@ -1,116 +0,0 @@ -# WARNING: The HashiCups files have been copied directly from -# https://github.com/hashicorp/learn-consul-kubernetes/tree/main/layer7-observability/hashicups -# Any modifications begin with the comment # BEGIN CONSUL-K8S MODIFICATION -# and end with the comment # BEGIN CONSUL-K8S MODIFICATION. -# If keeping these files manually up to date with their upstream source, -# the files will need to be copied from the above repo and transferred here. -# Once transferred, all modifications will need to be reapplied. ---- -apiVersion: v1 -kind: Service -metadata: - name: frontend - labels: - app: frontend -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: 80 - selector: - app: frontend ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: frontend -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: frontend -spec: - protocol: "http" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: nginx-configmap -data: - config: | - # /etc/nginx/conf.d/default.conf - server { - listen 80; - server_name localhost; - #charset koi8-r; - #access_log /var/log/nginx/host.access.log main; - location / { - root /usr/share/nginx/html; - index index.html index.htm; - } - # Proxy pass the api location to save CORS - # Use location exposed by Consul connect - location /api { - # BEGIN CONSUL-K8S MODIFICATION - proxy_pass http://public-api.{{ .Release.Namespace }}.svc.cluster.local:8080; - # END CONSUL-K8S MODIFICATION - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; - } - error_page 500 502 503 504 /50x.html; - location = /50x.html { - root /usr/share/nginx/html; - } - } ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: frontend -spec: - replicas: 1 - selector: - matchLabels: - service: frontend - app: frontend - template: - metadata: - labels: - service: frontend - app: frontend - # BEGIN CONSUL-K8S MODIFICATION - annotations: - 'consul.hashicorp.com/connect-inject': 'true' - # END CONSUL-K8S MODIFICATION - spec: - serviceAccountName: frontend - volumes: - - name: config - configMap: - name: nginx-configmap - items: - - key: config - path: default.conf - containers: - - name: frontend - image: hashicorpdemoapp/frontend:v0.0.3 - ports: - - containerPort: 80 - volumeMounts: - - name: config - mountPath: /etc/nginx/conf.d - readOnly: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceIntentions -metadata: - name: frontend-to-public-api -spec: - destination: - name: public-api - sources: - - name: frontend - action: allow diff --git a/charts/demo/templates/postgres.yaml b/charts/demo/templates/postgres.yaml deleted file mode 100644 index 89794fa3e3..0000000000 --- a/charts/demo/templates/postgres.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# WARNING: The HashiCups files have been copied directly from -# https://github.com/hashicorp/learn-consul-kubernetes/tree/main/layer7-observability/hashicups -# Any modifications begin with the comment # BEGIN CONSUL-K8S MODIFICATION -# and end with the comment # BEGIN CONSUL-K8S MODIFICATION. -# If keeping these files manually up to date with their upstream source, -# the files will need to be copied from the above repo and transferred here. -# Once transferred, all modifications will need to be reapplied. ---- -apiVersion: v1 -kind: Service -metadata: - name: postgres - labels: - app: postgres -spec: - type: ClusterIP - ports: - - port: 5432 - targetPort: 5432 - selector: - app: postgres ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgres -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: postgres -spec: - protocol: tcp ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: postgres -spec: - replicas: 1 - selector: - matchLabels: - service: postgres - app: postgres - template: - metadata: - labels: - service: postgres - app: postgres - # BEGIN CONSUL-K8S MODIFICATION - annotations: - 'consul.hashicorp.com/connect-inject': 'true' - # END CONSUL-K8S MODIFICATION - spec: - serviceAccountName: postgres - containers: - - name: postgres - image: hashicorpdemoapp/product-api-db:v0.0.11 - ports: - - containerPort: 5432 - env: - - name: POSTGRES_DB - value: products - - name: POSTGRES_USER - value: postgres - - name: POSTGRES_PASSWORD - value: password - # only listen on loopback so only access is via connect proxy - args: ["-c", "listen_addresses=127.0.0.1"] - volumeMounts: - - mountPath: "/var/lib/postgresql/data" - name: "pgdata" - volumes: - - name: pgdata - emptyDir: {} diff --git a/charts/demo/templates/product-api.yaml b/charts/demo/templates/product-api.yaml deleted file mode 100644 index b89c25dccd..0000000000 --- a/charts/demo/templates/product-api.yaml +++ /dev/null @@ -1,108 +0,0 @@ -# WARNING: The HashiCups files have been copied directly from -# https://github.com/hashicorp/learn-consul-kubernetes/tree/main/layer7-observability/hashicups -# Any modifications begin with the comment # BEGIN CONSUL-K8S MODIFICATION -# and end with the comment # BEGIN CONSUL-K8S MODIFICATION. -# If keeping these files manually up to date with their upstream source, -# the files will need to be copied from the above repo and transferred here. -# Once transferred, all modifications will need to be reapplied. ---- -apiVersion: v1 -kind: Service -metadata: - name: product-api -spec: - selector: - app: product-api - ports: - - name: http - protocol: TCP - port: 9090 - targetPort: 9090 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: product-api -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: product-api -spec: - protocol: "http" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: db-configmap -data: - # BEGIN CONSUL-K8S MODIFICATION - config: | - { - "db_connection": "host=postgres.{{ .Release.Namespace }}.svc.cluster.local port=5432 user=postgres password=password dbname=products sslmode=disable", - "bind_address": ":9090", - "metrics_address": ":9103" - } - # END CONSUL-K8S MODIFICATION ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: product-api - labels: - app: product-api -spec: - replicas: 1 - selector: - matchLabels: - app: product-api - template: - metadata: - labels: - app: product-api - # BEGIN CONSUL-K8S MODIFICATION - annotations: - 'consul.hashicorp.com/connect-inject': 'true' - # END CONSUL-K8S MODIFICATION - spec: - serviceAccountName: product-api - volumes: - - name: config - configMap: - name: db-configmap - items: - - key: config - path: conf.json - containers: - - name: product-api - image: hashicorpdemoapp/product-api:v0.0.12 - ports: - - containerPort: 9090 - - containerPort: 9103 - env: - - name: "CONFIG_FILE" - value: "/config/conf.json" - livenessProbe: - httpGet: - path: /health - port: 9090 - initialDelaySeconds: 15 - timeoutSeconds: 1 - periodSeconds: 10 - failureThreshold: 30 - volumeMounts: - - name: config - mountPath: /config - readOnly: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceIntentions -metadata: - name: product-api-to-postgres -spec: - destination: - name: postgres - sources: - - name: product-api - action: allow diff --git a/charts/demo/templates/public-api.yaml b/charts/demo/templates/public-api.yaml deleted file mode 100644 index 3c812c26f6..0000000000 --- a/charts/demo/templates/public-api.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# WARNING: The HashiCups files have been copied directly from -# https://github.com/hashicorp/learn-consul-kubernetes/tree/main/layer7-observability/hashicups -# Any modifications begin with the comment # BEGIN CONSUL-K8S MODIFICATION -# and end with the comment # BEGIN CONSUL-K8S MODIFICATION. -# If keeping these files manually up to date with their upstream source, -# the files will need to be copied from the above repo and transferred here. -# Once transferred, all modifications will need to be reapplied. ---- -apiVersion: v1 -kind: Service -metadata: - name: public-api - labels: - app: public-api -spec: - type: ClusterIP - ports: - - port: 8080 - targetPort: 8080 - selector: - app: public-api ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: public-api -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: public-api -spec: - protocol: "http" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: public-api -spec: - replicas: 1 - selector: - matchLabels: - service: public-api - app: public-api - template: - metadata: - labels: - service: public-api - app: public-api - # BEGIN CONSUL-K8S MODIFICATION - annotations: - 'consul.hashicorp.com/connect-inject': 'true' - # END CONSUL-K8S MODIFICATION - spec: - serviceAccountName: public-api - containers: - - name: public-api - image: hashicorpdemoapp/public-api:v0.0.3 - ports: - - containerPort: 8080 - env: - - name: BIND_ADDRESS - value: ":8080" - - name: PRODUCT_API_URI - # BEGIN CONSUL-K8S MODIFICATION - value: "http://product-api.{{ .Release.Namespace }}.svc.cluster.local:9090" - # END CONSUL-K8S MODIFICATION ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceIntentions -metadata: - name: public-api-to-product-api -spec: - destination: - name: product-api - sources: - - name: public-api - action: allow diff --git a/charts/demo/values.yaml b/charts/demo/values.yaml deleted file mode 100644 index 2dd99602c7..0000000000 --- a/charts/demo/values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Default values for demo. diff --git a/charts/embed_chart.go b/charts/embed_chart.go index 29e7e9635e..6393508ebb 100644 --- a/charts/embed_chart.go +++ b/charts/embed_chart.go @@ -14,6 +14,3 @@ import "embed" // explicitly embedded. //go:embed consul/Chart.yaml consul/values.yaml consul/templates consul/templates/_helpers.tpl var ConsulHelmChart embed.FS - -//go:embed demo/Chart.yaml demo/values.yaml demo/templates -var DemoHelmChart embed.FS diff --git a/charts/go.mod b/charts/go.mod index cdb23e46b0..9c001ddad8 100644 --- a/charts/go.mod +++ b/charts/go.mod @@ -1,3 +1,3 @@ module github.com/hashicorp/consul-k8s/charts -go 1.19 +go 1.18 diff --git a/cli/cmd/install/install.go b/cli/cmd/install/install.go index 7b5d5bb31c..61742cebbe 100644 --- a/cli/cmd/install/install.go +++ b/cli/cmd/install/install.go @@ -3,7 +3,6 @@ package install import ( "errors" "fmt" - "net/http" "os" "strings" "sync" @@ -15,12 +14,9 @@ import ( "github.com/hashicorp/consul-k8s/cli/common/terminal" "github.com/hashicorp/consul-k8s/cli/config" "github.com/hashicorp/consul-k8s/cli/helm" - "github.com/hashicorp/consul-k8s/cli/preset" "github.com/hashicorp/consul-k8s/cli/release" "github.com/hashicorp/consul-k8s/cli/validation" "github.com/posener/complete" - "golang.org/x/text/cases" - "golang.org/x/text/language" "helm.sh/helm/v3/pkg/action" helmCLI "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/cli/values" @@ -29,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" - "k8s.io/utils/strings/slices" "sigs.k8s.io/yaml" ) @@ -61,11 +56,6 @@ const ( flagNameContext = "context" flagNameKubeconfig = "kubeconfig" - - flagNameHCPResourceID = "hcp-resource-id" - - flagNameDemo = "demo" - defaultDemo = false ) type Command struct { @@ -73,26 +63,20 @@ type Command struct { kubernetes kubernetes.Interface - helmActionsRunner helm.HelmActionsRunner - - httpClient *http.Client - set *flag.Sets - flagPreset string - flagNamespace string - flagDryRun bool - flagAutoApprove bool - flagValueFiles []string - flagSetStringValues []string - flagSetValues []string - flagFileValues []string - flagTimeout string - timeoutDuration time.Duration - flagVerbose bool - flagWait bool - flagDemo bool - flagNameHCPResourceID string + flagPreset string + flagNamespace string + flagDryRun bool + flagAutoApprove bool + flagValueFiles []string + flagSetStringValues []string + flagSetValues []string + flagFileValues []string + flagTimeout string + timeoutDuration time.Duration + flagVerbose bool + flagWait bool flagKubeConfig string flagKubeContext string @@ -102,6 +86,12 @@ type Command struct { } func (c *Command) init() { + // Store all the possible preset values in 'presetList'. Printed in the help message. + var presetList []string + for name := range config.Presets { + presetList = append(presetList, name) + } + c.set = flag.NewSets() f := c.set.NewSet("Command Options") f.BoolVar(&flag.BoolVar{ @@ -132,7 +122,7 @@ func (c *Command) init() { Name: flagNamePreset, Target: &c.flagPreset, Default: defaultPreset, - Usage: fmt.Sprintf("Use an installation preset, one of %s. Defaults to none", strings.Join(preset.Presets, ", ")), + Usage: fmt.Sprintf("Use an installation preset, one of %s. Defaults to none", strings.Join(presetList, ", ")), }) f.StringSliceVar(&flag.StringSliceVar{ Name: flagNameSetValues, @@ -169,19 +159,6 @@ func (c *Command) init() { Default: defaultWait, Usage: "Wait for Kubernetes resources in installation to be ready before exiting command.", }) - f.BoolVar(&flag.BoolVar{ - Name: flagNameDemo, - Target: &c.flagDemo, - Default: defaultDemo, - Usage: fmt.Sprintf("Install %s immediately after installing %s.", - common.ReleaseTypeConsulDemo, common.ReleaseTypeConsul), - }) - f.StringVar(&flag.StringVar{ - Name: flagNameHCPResourceID, - Target: &c.flagNameHCPResourceID, - Default: "", - Usage: "Set the HCP resource_id when using the 'cloud' preset.", - }) f = c.set.NewSet("Global Options") f.StringVar(&flag.StringVar{ @@ -204,9 +181,6 @@ func (c *Command) init() { // Run installs Consul into a Kubernetes cluster. func (c *Command) Run(args []string) int { c.once.Do(c.init) - if c.helmActionsRunner == nil { - c.helmActionsRunner = &helm.ActionRunner{} - } // The logger is initialized in main with the name cli. Here, we reset the name to install so log lines would be prefixed with install. c.Log.ResetNamed("install") @@ -269,11 +243,7 @@ func (c *Command) Run(args []string) int { c.UI.Output("Checking if Consul can be installed", terminal.WithHeaderStyle()) // Ensure there is not an existing Consul installation which would cause a conflict. - if found, name, ns, _ := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.DefaultReleaseName, - DebugLog: uiLogger, - }); found { + if name, ns, err := common.CheckForInstallations(settings, uiLogger); err == nil { c.UI.Output("Cannot install Consul. A Consul cluster is already installed in namespace %s with name %s.", ns, name, terminal.WithErrorStyle()) c.UI.Output("Use the command `consul-k8s uninstall` to uninstall Consul from the cluster.", terminal.WithInfoStyle()) return 1 @@ -287,38 +257,6 @@ func (c *Command) Run(args []string) int { } c.UI.Output("No existing Consul persistent volume claims found", terminal.WithSuccessStyle()) - release := release.Release{ - Name: common.DefaultReleaseName, - Namespace: c.flagNamespace, - } - - msg, err := c.checkForPreviousSecrets(release) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 - } - c.UI.Output(msg, terminal.WithSuccessStyle()) - - if c.flagDemo { - c.UI.Output("Checking if %s can be installed", - cases.Title(language.English).String(common.ReleaseTypeConsulDemo), - terminal.WithHeaderStyle()) - - // Ensure there is not an existing Consul demo installation which would cause a conflict. - if found, name, ns, _ := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.ConsulDemoAppReleaseName, - DebugLog: uiLogger, - }); found { - c.UI.Output("Cannot install %s. A %s cluster is already installed in namespace %s with name %s.", - common.ReleaseTypeConsulDemo, common.ReleaseTypeConsulDemo, ns, name, terminal.WithErrorStyle()) - c.UI.Output("Use the command `consul-k8s uninstall` to uninstall the %s from the cluster.", - common.ReleaseTypeConsulDemo, terminal.WithInfoStyle()) - return 1 - } - c.UI.Output("No existing %s installations found.", common.ReleaseTypeConsulDemo, terminal.WithSuccessStyle()) - } - // Handle preset, value files, and set values logic. vals, err := c.mergeValuesFlagsWithPrecedence(settings) if err != nil { @@ -338,104 +276,104 @@ func (c *Command) Run(args []string) int { return 1 } - release.Configuration = helmVals - - // If an enterprise license secret was provided, check that the secret exists and that the enterprise Consul image is set. - if helmVals.Global.EnterpriseLicense.SecretName != "" { - if err := c.checkValidEnterprise(release.Configuration.Global.EnterpriseLicense.SecretName); err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 - } - c.UI.Output("Valid enterprise Consul secret found.", terminal.WithSuccessStyle()) + rel := release.Release{ + Name: "consul", + Namespace: c.flagNamespace, + Configuration: helmVals, } - err = c.installConsul(valuesYaml, vals, settings, uiLogger) + msg, err := c.checkForPreviousSecrets(rel) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } + c.UI.Output(msg, terminal.WithSuccessStyle()) - if c.flagDemo { - timeout, err := time.ParseDuration(c.flagTimeout) - if err != nil { + // If an enterprise license secret was provided, check that the secret exists and that the enterprise Consul image is set. + if helmVals.Global.EnterpriseLicense.SecretName != "" { + if err := c.checkValidEnterprise(rel.Configuration.Global.EnterpriseLicense.SecretName); err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - options := &helm.InstallOptions{ - ReleaseName: common.ConsulDemoAppReleaseName, - ReleaseType: common.ReleaseTypeConsulDemo, - Namespace: c.flagNamespace, - Values: make(map[string]interface{}), - Settings: settings, - EmbeddedChart: consulChart.DemoHelmChart, - ChartDirName: "demo", - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } - err = helm.InstallDemoApp(options) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 + c.UI.Output("Valid enterprise Consul secret found.", terminal.WithSuccessStyle()) + } + + // Print out the installation summary. + if !c.flagAutoApprove { + c.UI.Output("Consul Installation Summary", terminal.WithHeaderStyle()) + c.UI.Output("Name: %s", common.DefaultReleaseName, terminal.WithInfoStyle()) + c.UI.Output("Namespace: %s", c.flagNamespace, terminal.WithInfoStyle()) + + if len(vals) == 0 { + c.UI.Output("\nNo overrides provided, using the default Helm values.", terminal.WithInfoStyle()) + } else { + c.UI.Output("\nHelm value overrides\n-------------------\n"+string(valuesYaml), terminal.WithInfoStyle()) } } + // Without informing the user, default global.name to consul if it hasn't been set already. We don't allow setting + // the release name, and since that is hardcoded to "consul", setting global.name to "consul" makes it so resources + // aren't double prefixed with "consul-consul-...". + vals = common.MergeMaps(config.Convert(config.GlobalNameConsul), vals) + if c.flagDryRun { c.UI.Output("Dry run complete. No changes were made to the Kubernetes cluster.\n"+ "Installation can proceed with this configuration.", terminal.WithInfoStyle()) + return 0 } - return 0 -} + if !c.flagAutoApprove { + confirmation, err := c.UI.Input(&terminal.Input{ + Prompt: "Proceed with installation? (y/N)", + Style: terminal.InfoStyle, + Secret: false, + }) -func (c *Command) installConsul(valuesYaml []byte, vals map[string]interface{}, settings *helmCLI.EnvSettings, uiLogger action.DebugLog) error { - // Print out the installation summary. - c.UI.Output("Consul Installation Summary", terminal.WithHeaderStyle()) - c.UI.Output("Name: %s", common.DefaultReleaseName, terminal.WithInfoStyle()) - c.UI.Output("Namespace: %s", c.flagNamespace, terminal.WithInfoStyle()) - - if len(vals) == 0 { - c.UI.Output("\nNo overrides provided, using the default Helm values.", terminal.WithInfoStyle()) - } else { - c.UI.Output("\nHelm value overrides\n--------------------\n"+string(valuesYaml), terminal.WithInfoStyle()) + if err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + if common.Abort(confirmation) { + c.UI.Output("Install aborted. Use the command `consul-k8s install -help` to learn how to customize your installation.", + terminal.WithInfoStyle()) + return 1 + } } - // Without informing the user, default global.name to consul if it hasn't been set already. We don't allow setting - // the release name, and since that is hardcoded to "consul", setting global.name to "consul" makes it so resources - // aren't double prefixed with "consul-consul-...". - vals = common.MergeMaps(config.ConvertToMap(config.GlobalNameConsul), vals) + c.UI.Output("Installing Consul", terminal.WithHeaderStyle()) - timeout, err := time.ParseDuration(c.flagTimeout) + // Setup action configuration for Helm Go SDK function calls. + actionConfig := new(action.Configuration) + actionConfig, err = helm.InitActionConfig(actionConfig, c.flagNamespace, settings, uiLogger) if err != nil { - return err + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 } - installOptions := &helm.InstallOptions{ - ReleaseName: common.DefaultReleaseName, - ReleaseType: common.ReleaseTypeConsul, - Namespace: c.flagNamespace, - Values: vals, - Settings: settings, - EmbeddedChart: consulChart.ConsulHelmChart, - ChartDirName: common.TopLevelChartDirName, - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } - - err = helm.InstallHelmRelease(installOptions) + + // Setup the installation action. + install := action.NewInstall(actionConfig) + install.ReleaseName = common.DefaultReleaseName + install.Namespace = c.flagNamespace + install.CreateNamespace = true + install.Wait = c.flagWait + install.Timeout = c.timeoutDuration + + // Load the Helm chart. + chart, err := helm.LoadChart(consulChart.ConsulHelmChart, common.TopLevelChartDirName) if err != nil { - return err + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 } + c.UI.Output("Downloaded charts", terminal.WithSuccessStyle()) - return nil + // Run the install. + if _, err = install.Run(chart, vals); err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + + c.UI.Output("Consul installed in namespace %q.", c.flagNamespace, terminal.WithSuccessStyle()) + return 0 } // Help returns a description of the command and how it is used. @@ -467,8 +405,6 @@ func (c *Command) AutocompleteFlags() complete.Flags { fmt.Sprintf("-%s", flagNameWait): complete.PredictNothing, fmt.Sprintf("-%s", flagNameContext): complete.PredictNothing, fmt.Sprintf("-%s", flagNameKubeconfig): complete.PredictNothing, - fmt.Sprintf("-%s", flagNameDemo): complete.PredictNothing, - fmt.Sprintf("-%s", flagNameHCPResourceID): complete.PredictNothing, } } @@ -564,14 +500,7 @@ func (c *Command) mergeValuesFlagsWithPrecedence(settings *helmCLI.EnvSettings) } if c.flagPreset != defaultPreset { // Note the ordering of the function call, presets have lower precedence than set vals. - p, err := c.getPreset(c.flagPreset) - if err != nil { - return nil, fmt.Errorf("error getting preset provider: %s", err) - } - presetMap, err := p.GetValueMap() - if err != nil { - return nil, fmt.Errorf("error getting preset values: %s", err) - } + presetMap := config.Presets[c.flagPreset].(map[string]interface{}) vals = common.MergeMaps(presetMap, vals) } return vals, err @@ -588,28 +517,13 @@ func (c *Command) validateFlags(args []string) error { if len(c.flagValueFiles) != 0 && c.flagPreset != defaultPreset { return fmt.Errorf("cannot set both -%s and -%s", flagNameConfigFile, flagNamePreset) } - if ok := slices.Contains(preset.Presets, c.flagPreset); c.flagPreset != defaultPreset && !ok { + if _, ok := config.Presets[c.flagPreset]; c.flagPreset != defaultPreset && !ok { return fmt.Errorf("'%s' is not a valid preset", c.flagPreset) } if !common.IsValidLabel(c.flagNamespace) { return fmt.Errorf("'%s' is an invalid namespace. Namespaces follow the RFC 1123 label convention and must "+ "consist of a lower case alphanumeric character or '-' and must start/end with an alphanumeric character", c.flagNamespace) } - - if c.flagPreset == preset.PresetCloud { - clientID := os.Getenv(preset.EnvHCPClientID) - clientSecret := os.Getenv(preset.EnvHCPClientSecret) - if clientID == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' environment variable must also be set", preset.PresetCloud, preset.EnvHCPClientID) - } else if clientSecret == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' environment variable must also be set", preset.PresetCloud, preset.EnvHCPClientSecret) - } else if c.flagNameHCPResourceID == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' flag must also be provided", preset.PresetCloud, flagNameHCPResourceID) - } - } else if c.flagNameHCPResourceID != "" { - return fmt.Errorf("The '%s' flag can only be used with the '%s' preset", flagNameHCPResourceID, preset.PresetCloud) - } - duration, err := time.ParseDuration(c.flagTimeout) if err != nil { return fmt.Errorf("unable to parse -%s: %s", flagNameTimeout, err) @@ -638,22 +552,3 @@ func (c *Command) checkValidEnterprise(secretName string) error { } return nil } - -// getPreset is a factory function that, given a string, produces a struct that -// implements the Preset interface. If the string is not recognized an error is -// returned. -func (c *Command) getPreset(name string) (preset.Preset, error) { - hcpConfig := preset.GetHCPPresetFromEnv(c.flagNameHCPResourceID) - getPresetConfig := &preset.GetPresetConfig{ - Name: name, - CloudPreset: &preset.CloudPreset{ - KubernetesClient: c.kubernetes, - KubernetesNamespace: c.flagNamespace, - HCPConfig: hcpConfig, - UI: c.UI, - HTTPClient: c.httpClient, - Context: c.Ctx, - }, - } - return preset.GetPreset(getPresetConfig) -} diff --git a/cli/cmd/install/install_test.go b/cli/cmd/install/install_test.go index 1415ea5be9..a66febc336 100644 --- a/cli/cmd/install/install_test.go +++ b/cli/cmd/install/install_test.go @@ -1,41 +1,40 @@ package install import ( - "bytes" "context" - "errors" "flag" "fmt" - "io" "os" "testing" "github.com/hashicorp/consul-k8s/cli/common" cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" "github.com/hashicorp/consul-k8s/cli/helm" - "github.com/hashicorp/consul-k8s/cli/preset" "github.com/hashicorp/consul-k8s/cli/release" "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmRelease "helm.sh/helm/v3/pkg/release" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) func TestCheckForPreviousPVCs(t *testing.T) { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) c.kubernetes = fake.NewSimpleClientset() - - createPVC(t, "consul-server-test1", "default", c.kubernetes) - createPVC(t, "consul-server-test2", "default", c.kubernetes) - + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test1", + }, + } + pvc2 := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test2", + }, + } + c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc, metav1.CreateOptions{}) + c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc2, metav1.CreateOptions{}) err := c.checkForPreviousPVCs() require.Error(t, err) require.Equal(t, err.Error(), "found persistent volume claims from previous installations, delete before reinstalling: default/consul-server-test1,default/consul-server-test2") @@ -46,7 +45,12 @@ func TestCheckForPreviousPVCs(t *testing.T) { require.NoError(t, err) // Add a new irrelevant PVC and make sure the check continues to pass. - createPVC(t, "irrelevant-pvc", "default", c.kubernetes) + pvc = &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "irrelevant-pvc", + }, + } + c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc, metav1.CreateOptions{}) err = c.checkForPreviousPVCs() require.NoError(t, err) } @@ -142,7 +146,7 @@ func TestCheckForPreviousSecrets(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) c.kubernetes = fake.NewSimpleClientset() c.kubernetes.CoreV1().Secrets("consul").Create(context.Background(), tc.secret, metav1.CreateOptions{}) @@ -190,7 +194,7 @@ func TestValidateFlags(t *testing.T) { } for _, testCase := range testCases { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) t.Run(testCase.description, func(t *testing.T) { if err := c.validateFlags(testCase.input); err == nil { t.Errorf("Test case should have failed.") @@ -200,22 +204,16 @@ func TestValidateFlags(t *testing.T) { } // getInitializedCommand sets up a command struct for tests. -func getInitializedCommand(t *testing.T, buf io.Writer) *Command { +func getInitializedCommand(t *testing.T) *Command { t.Helper() log := hclog.New(&hclog.LoggerOptions{ Name: "cli", Level: hclog.Info, Output: os.Stdout, }) - var ui terminal.UI - if buf != nil { - ui = terminal.NewUI(context.Background(), buf) - } else { - ui = terminal.NewBasicUI(context.Background()) - } + baseCommand := &common.BaseCommand{ Log: log, - UI: ui, } c := &Command{ @@ -226,7 +224,7 @@ func getInitializedCommand(t *testing.T, buf io.Writer) *Command { } func TestCheckValidEnterprise(t *testing.T) { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) c.kubernetes = fake.NewSimpleClientset() secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -240,7 +238,7 @@ func TestCheckValidEnterprise(t *testing.T) { } // Enterprise secret is valid. - createSecret(t, secret, "consul", c.kubernetes) + c.kubernetes.CoreV1().Secrets("consul").Create(context.Background(), secret, metav1.CreateOptions{}) err := c.checkValidEnterprise(secret.Name) require.NoError(t, err) @@ -258,7 +256,7 @@ func TestCheckValidEnterprise(t *testing.T) { func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { t.Parallel() - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) predictor := cmd.AutocompleteFlags() @@ -281,437 +279,7 @@ func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { } func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } - -// TestValidateCloudPresets tests the validate flags function when passed the cloud preset. -func TestValidateCloudPresets(t *testing.T) { - testCases := []struct { - description string - input []string - preProcessingFunc func() - postProcessingFunc func() - expectError bool - }{ - { - "Should not error on cloud preset when HCP_CLIENT_ID and HCP_CLIENT_SECRET envvars are present and hcp-resource-id parameter is provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - false, - }, - { - "Should error on cloud preset when HCP_CLIENT_ID is not provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error on cloud preset when HCP_CLIENT_SECRET is not provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error on cloud preset when -hcp-resource-id flag is not provided.", - []string{"-preset=cloud"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error when -hcp-resource-id flag is provided but cloud preset is not specified.", - []string{"-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - } - - for _, testCase := range testCases { - testCase.preProcessingFunc() - c := getInitializedCommand(t, nil) - t.Run(testCase.description, func(t *testing.T) { - err := c.validateFlags(testCase.input) - if testCase.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - defer testCase.postProcessingFunc() - } -} - -func TestGetPreset(t *testing.T) { - testCases := []struct { - description string - presetName string - }{ - { - "'cloud' should return a CloudPreset'.", - preset.PresetCloud, - }, - { - "'quickstart' should return a QuickstartPreset'.", - preset.PresetQuickstart, - }, - { - "'secure' should return a SecurePreset'.", - preset.PresetSecure, - }, - } - - for _, tc := range testCases { - c := getInitializedCommand(t, nil) - t.Run(tc.description, func(t *testing.T) { - p, err := c.getPreset(tc.presetName) - require.NoError(t, err) - switch p.(type) { - case *preset.CloudPreset: - require.Equal(t, preset.PresetCloud, tc.presetName) - case *preset.QuickstartPreset: - require.Equal(t, preset.PresetQuickstart, tc.presetName) - case *preset.SecurePreset: - require.Equal(t, preset.PresetSecure, tc.presetName) - } - }) - } -} - -func TestInstall(t *testing.T) { - var k8s kubernetes.Interface - licenseSecretName := "consul-license" - cases := map[string]struct { - input []string - messages []string - helmActionsRunner *helm.MockActionRunner - preProcessingFunc func() - expectedReturnCode int - expectCheckedForConsulInstallations bool - expectCheckedForConsulDemoInstallations bool - expectConsulInstalled bool - expectConsulDemoInstalled bool - }{ - "install with no arguments returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "install when consul installation errors returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - InstallFunc: func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*helmRelease.Release, error) { - return nil, errors.New("Helm returned an error.") - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install with no arguments when consul installation already exists returns error": { - input: []string{ - "--auto-approve", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ! Cannot install Consul. A Consul cluster is already installed in namespace consul with name consul.\n Use the command `consul-k8s uninstall` to uninstall Consul from the cluster.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - return true, "consul", "consul", nil - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install with no arguments when PVCs exist returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ! found persistent volume claims from previous installations, delete before reinstalling: consul/consul-server-test1\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - preProcessingFunc: func() { - createPVC(t, "consul-server-test1", "consul", k8s) - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install with no arguments when secrets exist returns error": { - input: []string{ - "--auto-approve", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ! Found Consul secrets, possibly from a previous installation.\nDelete existing Consul secrets from Kubernetes:\n\nkubectl delete secret consul-secret --namespace consul\n\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - preProcessingFunc: func() { - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "consul-secret", - Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, - }, - } - createSecret(t, secret, "consul", k8s) - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "enterprise install when license secret exists returns success": { - input: []string{ - "--set", fmt.Sprintf("global.enterpriseLicense.secretName=%s", licenseSecretName), - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n ✓ Valid enterprise Consul secret found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n Helm value overrides\n --------------------\n global:\n enterpriseLicense:\n secretName: consul-license\n \n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - preProcessingFunc: func() { - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: licenseSecretName, - }, - } - createSecret(t, secret, "consul", k8s) - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "enterprise install when license secret does not exist returns error": { - input: []string{ - "--set", fmt.Sprintf("global.enterpriseLicense.secretName=%s", licenseSecretName), - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n ! enterprise license secret \"consul-license\" is not found in the \"consul\" namespace; please make sure that the secret exists in the \"consul\" namespace\n"}, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install for quickstart preset returns success": { - input: []string{ - "-preset", "quickstart", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n Helm value overrides\n --------------------\n connectInject:\n enabled: true\n metrics:\n defaultEnableMerging: true\n defaultEnabled: true\n enableGatewayMetrics: true\n controller:\n enabled: true\n global:\n metrics:\n enableAgentMetrics: true\n enabled: true\n name: consul\n prometheus:\n enabled: true\n server:\n replicas: 1\n ui:\n enabled: true\n service:\n enabled: true\n \n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "install for secure preset returns success": { - input: []string{ - "-preset", "secure", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n Helm value overrides\n --------------------\n connectInject:\n enabled: true\n controller:\n enabled: true\n global:\n acls:\n manageSystemACLs: true\n gossipEncryption:\n autoGenerate: true\n name: consul\n tls:\n enableAutoEncrypt: true\n enabled: true\n server:\n replicas: 1\n \n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "install with demo flag returns success": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Checking if Consul Demo Application can be installed\n ✓ No existing Consul demo application installations found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: consul\n \n \n", - "\n==> Installing Consul demo application\n ✓ Downloaded charts.\n ✓ Consul demo application installed in namespace \"consul\".\n", - "\n==> Accessing Consul Demo Application UI\n kubectl port-forward deploy/frontend 8080:80 --namespace consul\n Browse to http://localhost:8080.\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulInstalled: true, - expectConsulDemoInstalled: true, - }, - "install with demo flag when consul demo installation errors returns error": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Checking if Consul Demo Application can be installed\n ✓ No existing Consul demo application installations found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: consul\n \n \n", - "\n==> Installing Consul demo application\n ✓ Downloaded charts.\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - InstallFunc: func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*helmRelease.Release, error) { - if install.ReleaseName == "consul" { - return &helmRelease.Release{Name: install.ReleaseName}, nil - } - return nil, errors.New("Helm returned an error.") - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "install with demo flag when demo is already installed returns error and does not install consul or the demo": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Checking if Consul Demo Application can be installed\n ! Cannot install Consul demo application. A Consul demo application cluster is already installed in namespace consul-demo with name consul-demo.\n Use the command `consul-k8s uninstall` to uninstall the Consul demo application from the cluster.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return false, "", "", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install with --dry-run flag returns success": { - input: []string{ - "--dry-run", - }, - messages: []string{ - "\n==> Performing dry run install. No changes will be made to the cluster.\n", - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n Dry run complete. No changes were made to the Kubernetes cluster.\n Installation can proceed with this configuration.\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - k8s = fake.NewSimpleClientset() - c.kubernetes = k8s - mock := tc.helmActionsRunner - c.helmActionsRunner = mock - if tc.preProcessingFunc != nil { - tc.preProcessingFunc() - } - input := append([]string{ - "--auto-approve", - }, tc.input...) - returnCode := c.Run(input) - require.Equal(t, tc.expectedReturnCode, returnCode) - require.Equal(t, tc.expectCheckedForConsulInstallations, mock.CheckedForConsulInstallations) - require.Equal(t, tc.expectCheckedForConsulDemoInstallations, mock.CheckedForConsulDemoInstallations) - require.Equal(t, tc.expectConsulInstalled, mock.ConsulInstalled) - require.Equal(t, tc.expectConsulDemoInstalled, mock.ConsulDemoInstalled) - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) - } -} - -func createPVC(t *testing.T, name string, namespace string, k8s kubernetes.Interface) { - t.Helper() - - pvc := &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - } - _, err := k8s.CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), pvc, metav1.CreateOptions{}) - require.NoError(t, err) -} - -func createSecret(t *testing.T, secret *v1.Secret, namespace string, k8s kubernetes.Interface) { - t.Helper() - _, err := k8s.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}) - require.NoError(t, err) -} diff --git a/cli/cmd/status/status.go b/cli/cmd/status/status.go index c2108cc631..19f5a52398 100644 --- a/cli/cmd/status/status.go +++ b/cli/cmd/status/status.go @@ -28,8 +28,6 @@ const ( type Command struct { *common.BaseCommand - helmActionsRunner helm.HelmActionsRunner - kubernetes kubernetes.Interface set *flag.Sets @@ -65,11 +63,10 @@ func (c *Command) init() { // Run checks the status of a Consul installation on Kubernetes. func (c *Command) Run(args []string) int { c.once.Do(c.init) - if c.helmActionsRunner == nil { - c.helmActionsRunner = &helm.ActionRunner{} - } + // The logger is initialized in main with the name cli. Here, we reset the name to status so log lines would be prefixed with status. c.Log.ResetNamed("status") + defer common.CloseWithError(c.BaseCommand) if err := c.set.Parse(args); err != nil { @@ -104,11 +101,7 @@ func (c *Command) Run(args []string) int { c.UI.Output("Consul Status Summary", terminal.WithHeaderStyle()) - _, releaseName, namespace, err := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.DefaultReleaseName, - DebugLog: uiLogger, - }) + releaseName, namespace, err := common.CheckForInstallations(settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 @@ -119,9 +112,18 @@ func (c *Command) Run(args []string) int { return 1 } - if err := c.checkConsulServers(namespace); err != nil { - c.UI.Output("Unable to check Kubernetes cluster for Consul servers: %v", err) + if s, err := c.checkConsulServers(namespace); err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 + } else { + c.UI.Output(s, terminal.WithSuccessStyle()) + } + + if s, err := c.checkConsulClients(namespace); err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } else { + c.UI.Output(s, terminal.WithSuccessStyle()) } return 0 @@ -163,7 +165,7 @@ func (c *Command) checkHelmInstallation(settings *helmCLI.EnvSettings, uiLogger } statuser := action.NewStatus(statusConfig) - rel, err := c.helmActionsRunner.GetStatus(statuser, releaseName) + rel, err := statuser.Run(releaseName) if err != nil { return fmt.Errorf("couldn't check for installations: %s", err) } @@ -214,24 +216,43 @@ func validEvent(events []release.HookEvent) bool { return false } -// checkConsulServers prints the status of Consul servers if they -// are expected to be found in the Kubernetes cluster. It does not check for -// server status if they are not running within the Kubernetes cluster. -func (c *Command) checkConsulServers(namespace string) error { - servers, err := c.kubernetes.AppsV1().StatefulSets(namespace).List(c.Ctx, metav1.ListOptions{LabelSelector: "app=consul,chart=consul-helm,component=server"}) +// checkConsulServers uses the Kubernetes list function to report if the consul servers are healthy. +func (c *Command) checkConsulServers(namespace string) (string, error) { + servers, err := c.kubernetes.AppsV1().StatefulSets(namespace).List(c.Ctx, + metav1.ListOptions{LabelSelector: "app=consul,chart=consul-helm,component=server"}) if err != nil { - return err + return "", err + } else if len(servers.Items) == 0 { + return "", errors.New("no server stateful set found") + } else if len(servers.Items) > 1 { + return "", errors.New("found multiple server stateful sets") } - if len(servers.Items) != 0 { - desiredServers, readyServers := int(*servers.Items[0].Spec.Replicas), int(servers.Items[0].Status.ReadyReplicas) - if readyServers < desiredServers { - c.UI.Output("Consul servers healthy %d/%d", readyServers, desiredServers, terminal.WithErrorStyle()) - } else { - c.UI.Output("Consul servers healthy %d/%d", readyServers, desiredServers) - } + + desiredReplicas := int(*servers.Items[0].Spec.Replicas) + readyReplicas := int(servers.Items[0].Status.ReadyReplicas) + if readyReplicas < desiredReplicas { + return "", fmt.Errorf("%d/%d Consul servers unhealthy", desiredReplicas-readyReplicas, desiredReplicas) } + return fmt.Sprintf("Consul servers healthy (%d/%d)", readyReplicas, desiredReplicas), nil +} - return nil +// checkConsulClients uses the Kubernetes list function to report if the consul clients are healthy. +func (c *Command) checkConsulClients(namespace string) (string, error) { + clients, err := c.kubernetes.AppsV1().DaemonSets(namespace).List(c.Ctx, + metav1.ListOptions{LabelSelector: "app=consul,chart=consul-helm"}) + if err != nil { + return "", err + } else if len(clients.Items) == 0 { + return "", errors.New("no client daemon set found") + } else if len(clients.Items) > 1 { + return "", errors.New("found multiple client daemon sets") + } + desiredReplicas := int(clients.Items[0].Status.DesiredNumberScheduled) + readyReplicas := int(clients.Items[0].Status.NumberReady) + if readyReplicas < desiredReplicas { + return "", fmt.Errorf("%d/%d Consul clients unhealthy", desiredReplicas-readyReplicas, desiredReplicas) + } + return fmt.Sprintf("Consul clients healthy (%d/%d)", readyReplicas, desiredReplicas), nil } // setupKubeClient to use for non Helm SDK calls to the Kubernetes API The Helm SDK will use diff --git a/cli/cmd/status/status_test.go b/cli/cmd/status/status_test.go index 8666fd8493..b45ffef556 100644 --- a/cli/cmd/status/status_test.go +++ b/cli/cmd/status/status_test.go @@ -1,215 +1,197 @@ package status import ( - "bytes" "context" - "errors" "flag" "fmt" - "io" "os" "testing" "github.com/hashicorp/consul-k8s/cli/common" cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/consul-k8s/cli/helm" "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmRelease "helm.sh/helm/v3/pkg/release" - helmTime "helm.sh/helm/v3/pkg/time" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) +// TestCheckConsulServers creates a fake stateful set and tests the checkConsulServers function. func TestCheckConsulServers(t *testing.T) { - namespace := "default" - cases := map[string]struct { - desired int - healthy int - }{ - "No servers": {0, 0}, - "3 servers expected, 1 healthy": {3, 1}, - "3 servers expected, 3 healthy": {3, 3}, + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() + + // First check that no stateful sets causes an error. + _, err := c.checkConsulServers("default") + require.Error(t, err) + require.Contains(t, err.Error(), "no server stateful set found") + + // Next create a stateful set with 3 desired replicas and 3 ready replicas. + var replicas int32 = 3 + + ss := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test1", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + }, } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - c.kubernetes = fake.NewSimpleClientset() - - // Deploy servers - err := createServers("consul-servers", namespace, int32(tc.desired), int32(tc.healthy), c.kubernetes) - require.NoError(t, err) - - // Verify that the correct server statuses are seen. - err = c.checkConsulServers(namespace) - require.NoError(t, err) - - actual := buf.String() - if tc.desired != 0 { - require.Contains(t, actual, fmt.Sprintf("Consul servers healthy %d/%d", tc.healthy, tc.desired)) - } - buf.Reset() - }) + c.kubernetes.AppsV1().StatefulSets("default").Create(context.Background(), ss, metav1.CreateOptions{}) + + // Now we run the checkConsulServers() function and it should succeed. + s, err := c.checkConsulServers("default") + require.NoError(t, err) + require.Equal(t, "Consul servers healthy (3/3)", s) + + // If you then create another stateful set it should error. + ss2 := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test2", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + }, } + c.kubernetes.AppsV1().StatefulSets("default").Create(context.Background(), ss2, metav1.CreateOptions{}) + + _, err = c.checkConsulServers("default") + require.Error(t, err) + require.Contains(t, err.Error(), "found multiple server stateful sets") + + // Clear out the client and now run a test where the stateful set isn't ready. + c.kubernetes = fake.NewSimpleClientset() + + ss3 := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test3", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas - 1, // Let's just set one of the servers to unhealthy + }, + } + c.kubernetes.AppsV1().StatefulSets("default").Create(context.Background(), ss3, metav1.CreateOptions{}) + + _, err = c.checkConsulServers("default") + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("%d/%d Consul servers unhealthy", 1, replicas)) } -// TestStatus creates a fake stateful set and tests the checkConsulServers function. -func TestStatus(t *testing.T) { - nowTime := helmTime.Now() - timezone, _ := nowTime.Zone() - notImeStr := nowTime.Format("2006/01/02 15:04:05") + " " + timezone - cases := map[string]struct { - input []string - messages []string - preProcessingFunc func(k8s kubernetes.Interface) error - helmActionsRunner *helm.MockActionRunner - expectedReturnCode int - }{ - "status with servers returns success": { - input: []string{}, - messages: []string{ - fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr), - "\n==> Config:\n {}\n \nConsul servers healthy 3/3\n", - }, - preProcessingFunc: func(k8s kubernetes.Interface) error { - return createServers("consul-server-test1", "consul", 3, 3, k8s) - }, - - helmActionsRunner: &helm.MockActionRunner{ - GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { - return &helmRelease.Release{ - Name: "consul", Namespace: "consul", - Info: &helmRelease.Info{LastDeployed: nowTime, Status: "READY"}, - Chart: &chart.Chart{ - Metadata: &chart.Metadata{ - Version: "1.0.0", - }, - }, - Config: make(map[string]interface{})}, nil - }, - }, - expectedReturnCode: 0, +// TestCheckConsulClients is very similar to TestCheckConsulServers() in structure. +func TestCheckConsulClients(t *testing.T) { + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() + + // No client daemon set should cause an error. + _, err := c.checkConsulClients("default") + require.Error(t, err) + require.Contains(t, err.Error(), "no client daemon set found") + + // Next create a daemon set. + var desired int32 = 3 + + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-client-test1", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm"}, }, - "status with pre-install and pre-upgrade hooks returns success and outputs hook status": { - input: []string{}, - messages: []string{ - fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr), - "\n==> Config:\n {}\n \n", - "\n==> Status Of Helm Hooks:\npre-install-hook pre-install: Succeeded\npre-upgrade-hook pre-upgrade: Succeeded\nConsul servers healthy 3/3\n", - }, - preProcessingFunc: func(k8s kubernetes.Interface) error { - return createServers("consul-server-test1", "consul", 3, 3, k8s) - }, - - helmActionsRunner: &helm.MockActionRunner{ - GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { - return &helmRelease.Release{ - Name: "consul", Namespace: "consul", - Info: &helmRelease.Info{LastDeployed: nowTime, Status: "READY"}, - Chart: &chart.Chart{ - Metadata: &chart.Metadata{ - Version: "1.0.0", - }, - }, - Config: make(map[string]interface{}), - Hooks: []*helmRelease.Hook{ - { - Name: "pre-install-hook", - Kind: "pre-install", LastRun: helmRelease.HookExecution{ - Phase: helmRelease.HookPhaseSucceeded, - }, - Events: []helmRelease.HookEvent{ - "pre-install", - }, - }, - { - Name: "pre-upgrade-hook", - Kind: "pre-upgrade", LastRun: helmRelease.HookExecution{ - Phase: helmRelease.HookPhaseSucceeded, - }, - Events: []helmRelease.HookEvent{ - "pre-install", - }, - }, - { - Name: "post-delete-hook", - Kind: "post-delete", LastRun: helmRelease.HookExecution{ - Phase: helmRelease.HookPhaseSucceeded, - }, - Events: []helmRelease.HookEvent{ - "post-delete", - }, - }, - }}, nil - }, - }, - expectedReturnCode: 0, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: desired, + NumberReady: desired, }, - "status with CheckForInstallations error returns ": { - input: []string{}, - messages: []string{ - "\n==> Consul Status Summary\n ! kaboom!\n", - }, - preProcessingFunc: func(k8s kubernetes.Interface) error { - return createServers("consul-server-test1", "consul", 3, 3, k8s) - }, - - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - return false, "", "", errors.New("kaboom!") - }, - }, - expectedReturnCode: 1, + } + + c.kubernetes.AppsV1().DaemonSets("default").Create(context.Background(), ds, metav1.CreateOptions{}) + + // Now run checkConsulClients() and make sure it succeeds. + s, err := c.checkConsulClients("default") + require.NoError(t, err) + require.Equal(t, "Consul clients healthy (3/3)", s) + + // Creating another daemon set should cause an error. + ds2 := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-client-test2", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm"}, }, - "status with GetStatus error returns ": { - input: []string{}, - messages: []string{ - "\n==> Consul Status Summary\n ! couldn't check for installations: kaboom!\n", - }, - preProcessingFunc: func(k8s kubernetes.Interface) error { - return createServers("consul-server-test1", "consul", 3, 3, k8s) - }, - - helmActionsRunner: &helm.MockActionRunner{ - GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { - return nil, errors.New("kaboom!") - }, - }, - expectedReturnCode: 1, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: desired, + NumberReady: desired, }, } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - c.kubernetes = fake.NewSimpleClientset() - c.helmActionsRunner = tc.helmActionsRunner - if tc.preProcessingFunc != nil { - err := tc.preProcessingFunc(c.kubernetes) - require.NoError(t, err) - } - returnCode := c.Run([]string{}) - require.Equal(t, tc.expectedReturnCode, returnCode) - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) + c.kubernetes.AppsV1().DaemonSets("default").Create(context.Background(), ds2, metav1.CreateOptions{}) + + _, err = c.checkConsulClients("default") + require.Error(t, err) + require.Contains(t, err.Error(), "found multiple client daemon sets") + + // Clear out the client and run a test with fewer than desired daemon sets ready. + c.kubernetes = fake.NewSimpleClientset() + + ds3 := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-client-test2", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm"}, + }, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: desired, + NumberReady: desired - 1, + }, + } + c.kubernetes.AppsV1().DaemonSets("default").Create(context.Background(), ds3, metav1.CreateOptions{}) + + _, err = c.checkConsulClients("default") + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("%d/%d Consul clients unhealthy", 1, desired)) +} + +// getInitializedCommand sets up a command struct for tests. +func getInitializedCommand(t *testing.T) *Command { + t.Helper() + log := hclog.New(&hclog.LoggerOptions{ + Name: "cli", + Level: hclog.Info, + Output: os.Stdout, + }) + + baseCommand := &common.BaseCommand{ + Log: log, } + + c := &Command{ + BaseCommand: baseCommand, + } + c.init() + return c } func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { t.Parallel() - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) predictor := cmd.AutocompleteFlags() @@ -232,52 +214,7 @@ func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { } func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } - -// getInitializedCommand sets up a command struct for tests. -func getInitializedCommand(t *testing.T, buf io.Writer) *Command { - t.Helper() - log := hclog.New(&hclog.LoggerOptions{ - Name: "cli", - Level: hclog.Info, - Output: os.Stdout, - }) - var ui terminal.UI - if buf != nil { - ui = terminal.NewUI(context.Background(), buf) - } else { - ui = terminal.NewBasicUI(context.Background()) - } - baseCommand := &common.BaseCommand{ - Log: log, - UI: ui, - } - - c := &Command{ - BaseCommand: baseCommand, - } - c.init() - return c -} - -func createServers(name, namespace string, replicas, readyReplicas int32, k8s kubernetes.Interface) error { - servers := appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - }, - Status: appsv1.StatefulSetStatus{ - Replicas: replicas, - ReadyReplicas: readyReplicas, - }, - } - _, err := k8s.AppsV1().StatefulSets(namespace).Create(context.Background(), &servers, metav1.CreateOptions{}) - return err -} diff --git a/cli/cmd/uninstall/uninstall.go b/cli/cmd/uninstall/uninstall.go index 06bf4a19b3..07b945bf79 100644 --- a/cli/cmd/uninstall/uninstall.go +++ b/cli/cmd/uninstall/uninstall.go @@ -12,18 +12,9 @@ import ( "github.com/hashicorp/consul-k8s/cli/common/terminal" "github.com/hashicorp/consul-k8s/cli/helm" "github.com/posener/complete" - "golang.org/x/text/cases" - "golang.org/x/text/language" "helm.sh/helm/v3/pkg/action" helmCLI "helm.sh/helm/v3/pkg/cli" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) @@ -41,7 +32,7 @@ const ( defaultWipeData = false flagTimeout = "timeout" - defaultTimeout = 10 * time.Minute + defaultTimeout = "10m" flagContext = "context" flagKubeconfig = "kubeconfig" @@ -50,12 +41,7 @@ const ( type Command struct { *common.BaseCommand - helmActionsRunner helm.HelmActionsRunner - - // Configuration for interacting with Kubernetes. - k8sClient kubernetes.Interface - dynamicK8sClient dynamic.Interface - apiextK8sClient apiext.Interface + kubernetes kubernetes.Interface set *flag.Sets @@ -63,7 +49,8 @@ type Command struct { flagReleaseName string flagAutoApprove bool flagWipeData bool - flagTimeout time.Duration + flagTimeout string + timeoutDuration time.Duration flagKubeConfig string flagKubeContext string @@ -99,7 +86,7 @@ func (c *Command) init() { Default: defaultAnyReleaseName, Usage: "Name of the installation. This can be used to uninstall and/or delete the resources of a specific Helm release.", }) - f.DurationVar(&flag.DurationVar{ + f.StringVar(&flag.StringVar{ Name: flagTimeout, Target: &c.flagTimeout, Default: defaultTimeout, @@ -137,10 +124,6 @@ func (c *Command) Run(args []string) int { } }() - if c.helmActionsRunner == nil { - c.helmActionsRunner = &helm.ActionRunner{} - } - if err := c.set.Parse(args); err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 @@ -153,6 +136,12 @@ func (c *Command) Run(args []string) int { c.UI.Output("Can't set -wipe-data alone. Omit this flag to interactively uninstall, or use it with -auto-approve to wipe all data during the uninstall.", terminal.WithErrorStyle()) return 1 } + duration, err := time.ParseDuration(c.flagTimeout) + if err != nil { + c.UI.Output("unable to parse -%s: %s", flagTimeout, err, terminal.WithErrorStyle()) + return 1 + } + c.timeoutDuration = duration // helmCLI.New() will create a settings object which is used by the Helm Go SDK calls. settings := helmCLI.New() @@ -163,8 +152,20 @@ func (c *Command) Run(args []string) int { settings.KubeContext = c.flagKubeContext } - if err := c.initKubernetes(settings); err != nil { - c.UI.Output("Could not initialize Kubernetes client: %v", err, terminal.WithErrorStyle()) + // Set up the kubernetes client to use for non Helm SDK calls to the Kubernetes API + // The Helm SDK will use settings.RESTClientGetter for its calls as well, so this will + // use a consistent method to target the right cluster for both Helm SDK and non Helm SDK calls. + if c.kubernetes == nil { + restConfig, err := settings.RESTClientGetter().ToRESTConfig() + if err != nil { + c.UI.Output("retrieving Kubernetes auth: %v", err, terminal.WithErrorStyle()) + return 1 + } + c.kubernetes, err = kubernetes.NewForConfig(restConfig) + if err != nil { + c.UI.Output("initializing Kubernetes client: %v", err, terminal.WithErrorStyle()) + return 1 + } } // Setup logger to stream Helm library logs. @@ -173,53 +174,61 @@ func (c *Command) Run(args []string) int { c.UI.Output(logMsg, terminal.WithLibraryStyle()) } - actionConfig := new(action.Configuration) - actionConfig, err := helm.InitActionConfig(actionConfig, c.flagNamespace, settings, uiLogger) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 - } + c.UI.Output("Existing Installation", terminal.WithHeaderStyle()) - c.UI.Output(fmt.Sprintf("Checking if %s can be uninstalled", common.ReleaseTypeConsulDemo), terminal.WithHeaderStyle()) - foundConsulDemo, foundDemoReleaseName, foundDemoReleaseNamespace, err := c.findExistingInstallation(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.ConsulDemoAppReleaseName, - DebugLog: uiLogger, - SkipErrorWhenNotFound: true, - }) + // Search for Consul installation by calling `helm list`. Depends on what's already specified. + actionConfig := new(action.Configuration) + actionConfig, err = helm.InitActionConfig(actionConfig, c.flagNamespace, settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 - } else if !foundConsulDemo { - c.UI.Output(fmt.Sprintf("No existing %s installation found.", common.ReleaseTypeConsulDemo), terminal.WithInfoStyle()) } - found, foundReleaseName, foundReleaseNamespace, err := - c.findExistingInstallation(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.DefaultReleaseName, - DebugLog: uiLogger, - }) + found, foundReleaseName, foundReleaseNamespace, err := c.findExistingInstallation(settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 - } + } else { + c.UI.Output("Existing Consul installation found.", terminal.WithSuccessStyle()) + c.UI.Output("Consul Uninstall Summary", terminal.WithHeaderStyle()) + c.UI.Output("Name: %s", foundReleaseName, terminal.WithInfoStyle()) + c.UI.Output("Namespace: %s", foundReleaseNamespace, terminal.WithInfoStyle()) + + // Prompt for approval to uninstall Helm release. + if !c.flagAutoApprove { + confirmation, err := c.UI.Input(&terminal.Input{ + Prompt: "Proceed with uninstall? (y/N)", + Style: terminal.InfoStyle, + Secret: false, + }) + if err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + if common.Abort(confirmation) { + c.UI.Output("Uninstall aborted. To learn how to customize the uninstall, run:\nconsul-k8s uninstall --help", terminal.WithInfoStyle()) + return 1 + } + } - if foundConsulDemo { - err = c.uninstallHelmRelease(foundDemoReleaseName, foundDemoReleaseNamespace, common.ReleaseTypeConsulDemo, settings, uiLogger, actionConfig) + // Actually call out to `helm delete`. + actionConfig, err = helm.InitActionConfig(actionConfig, foundReleaseNamespace, settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - } - c.UI.Output("Checking if Consul can be uninstalled", terminal.WithHeaderStyle()) - if found { - err = c.uninstallHelmRelease(foundReleaseName, foundReleaseNamespace, common.ReleaseTypeConsul, settings, uiLogger, actionConfig) + uninstaller := action.NewUninstall(actionConfig) + uninstaller.Timeout = c.timeoutDuration + res, err := uninstaller.Run(foundReleaseName) if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) + c.UI.Output("unable to uninstall: %s", err, terminal.WithErrorStyle()) return 1 } + if res != nil && res.Info != "" { + c.UI.Output("Uninstall result: %s", res.Info, terminal.WithInfoStyle()) + } + c.UI.Output("Successfully uninstalled Consul Helm release", terminal.WithSuccessStyle()) } // If -auto-approve=true and -wipe-data=false, we should only uninstall the release, and skip deleting resources. @@ -310,248 +319,6 @@ func (c *Command) Run(args []string) int { return 0 } -// initKubernetes sets up the kubernetes clients to use for non Helm SDK calls to the Kubernetes API. -// The Helm SDK will use settings.RESTClientGetter for its calls as well, so this will -// use a consistent method to target the right cluster for both Helm SDK and non Helm SDK calls. -func (c *Command) initKubernetes(settings *helmCLI.EnvSettings) error { - restConfig, err := settings.RESTClientGetter().ToRESTConfig() - if err != nil { - return err - } - - if c.k8sClient == nil { - if c.k8sClient, err = kubernetes.NewForConfig(restConfig); err != nil { - return err - } - } - - if c.dynamicK8sClient == nil { - if c.dynamicK8sClient, err = dynamic.NewForConfig(restConfig); err != nil { - return err - } - } - - if c.apiextK8sClient == nil { - if c.apiextK8sClient, err = apiext.NewForConfig(restConfig); err != nil { - return err - } - } - - return nil -} - -func (c *Command) uninstallHelmRelease(releaseName, namespace, releaseType string, settings *helmCLI.EnvSettings, - uiLogger action.DebugLog, actionConfig *action.Configuration) error { - c.UI.Output(fmt.Sprintf("Existing %s installation found.", releaseType), terminal.WithSuccessStyle()) - c.UI.Output(fmt.Sprintf("%s Uninstall Summary", cases.Title(language.English).String(releaseType)), terminal.WithHeaderStyle()) - c.UI.Output("Name: %s", releaseName, terminal.WithInfoStyle()) - c.UI.Output("Namespace: %s", namespace, terminal.WithInfoStyle()) - - // Prompt for approval to uninstall Helm release. - // Actually call out to `helm delete`. - if !c.flagAutoApprove { - confirmation, err := c.UI.Input(&terminal.Input{ - Prompt: "Proceed with uninstall? (y/N)", - Style: terminal.InfoStyle, - Secret: false, - }) - if err != nil { - return err - } - if common.Abort(confirmation) { - c.UI.Output("Uninstall aborted. To learn how to customize the uninstall, run:\nconsul-k8s uninstall --help", terminal.WithInfoStyle()) - return nil - } - } - - // Delete any custom resources managed by Consul. If they cannot be deleted, - // patch the finalizers to be empty on each one. - if releaseType == common.ReleaseTypeConsul { - if err := c.removeCustomResources(uiLogger); err != nil { - c.UI.Output("Error removing custom resources: %v", err.Error(), terminal.WithErrorStyle()) - } - } - - actionConfig, err := helm.InitActionConfig(actionConfig, namespace, settings, uiLogger) - if err != nil { - return err - } - - uninstall := action.NewUninstall(actionConfig) - uninstall.Timeout = c.flagTimeout - - res, err := c.helmActionsRunner.Uninstall(uninstall, releaseName) - if err != nil { - return err - } - if res != nil && res.Info != "" { - c.UI.Output("Uninstall result: %s", res.Info, terminal.WithInfoStyle()) - return nil - } - - c.UI.Output(fmt.Sprintf("Successfully uninstalled %s Helm release.", releaseType), terminal.WithSuccessStyle()) - return nil -} - -// removeCustomResources fetches a list of custom resource defintions managed -// by Consul and attempts to delete every custom resource for each definition. -// If the resources cannot be deleted directly, the finalizers on each resource -// are patched to be an empty list, freeing them to be deleted by Kubernetes. -func (c *Command) removeCustomResources(uiLogger action.DebugLog) error { - uiLogger("Deleting custom resources managed by Consul") - - crds, err := c.fetchCustomResourceDefinitions() - if err != nil { - return fmt.Errorf("unable to fetch Custom Resource Definitions for Consul deployment: %v", err) - } - kindToResource := mapCRKindToResourceName(crds) - - crs, err := c.fetchCustomResources(crds) - if err != nil { - return err - } - - if err = c.deleteCustomResources(crs, kindToResource, uiLogger); err != nil { - return err - } - - err = backoff.Retry(func() error { - crs, err := c.fetchCustomResources(crds) - if err != nil { - return err - } - if len(crs) != 0 { - return common.NewDanglingResourceError(fmt.Sprintf("%d custom resources remain after deletion request", len(crs))) - } - return nil - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 5)) - if !common.IsDanglingResourceError(err) { - return err - } - - // Custom resources could not be deleted directly, attempt to patch their finalizers to an empty array. - uiLogger("Patching finalizers on custom resources managed by Consul") - - crs, err = c.fetchCustomResources(crds) - if err != nil { - return err - } - - if err = c.patchCustomResources(crs, kindToResource, uiLogger); err != nil { - return err - } - - err = backoff.Retry(func() error { - crs, err := c.fetchCustomResources(crds) - if err != nil { - return err - } - if len(crs) != 0 { - return common.NewDanglingResourceError(fmt.Sprintf("%d custom resources remain after request to patch finalizers", len(crs))) - } - return nil - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 5)) - if err != nil { - return fmt.Errorf("unable to remove all custom resources managed by Consul. %d custom resources remain and will need to be removed manually. %v", len(crs), err) - } - - return nil -} - -// fetchCustomResourceDefinitions fetches all Custom Resource Definitions managed by Consul. -func (c *Command) fetchCustomResourceDefinitions() (*apiextv1.CustomResourceDefinitionList, error) { - return c.apiextK8sClient.ApiextensionsV1().CustomResourceDefinitions().List(c.Ctx, metav1.ListOptions{ - LabelSelector: "app=consul", - }) -} - -// fetchCustomResources gets a list of all custom resources deployed in the -// cluster that are managed by Consul. -func (c *Command) fetchCustomResources(crds *apiextv1.CustomResourceDefinitionList) ([]unstructured.Unstructured, error) { - crs := make([]unstructured.Unstructured, 0) - for _, crd := range crds.Items { - for _, version := range crd.Spec.Versions { - target := schema.GroupVersionResource{ - Group: crd.Spec.Group, - Version: version.Name, - Resource: crd.Spec.Names.Plural, - } - - crList, err := c.dynamicK8sClient.Resource(target).List(c.Ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - if crList != nil { - crs = append(crs, crList.Items...) - } - } - } - - return crs, nil -} - -// deleteCustomResources takes a list of unstructured custom resources and -// sends a request to each one to be deleted. -func (c *Command) deleteCustomResources(crs []unstructured.Unstructured, kindToResource map[string]string, uiLogger action.DebugLog) error { - for _, cr := range crs { - gv, err := schema.ParseGroupVersion(cr.GetAPIVersion()) - if err != nil { - return err - } - - target := schema.GroupVersionResource{ - Group: gv.Group, - Version: gv.Version, - Resource: kindToResource[cr.GetKind()], - } - - uiLogger(fmt.Sprintf("Starting delete for \"%s\" %s", cr.GetName(), cr.GetKind())) - err = c.dynamicK8sClient. - Resource(target). - Namespace(cr.GetNamespace()). - Delete(c.Ctx, cr.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return err - } - } - - return nil -} - -// patchCustomResources takes a list of unstructured custom resources and -// sends a request to each one to patch its finalizers to an empty list. -func (c *Command) patchCustomResources(crs []unstructured.Unstructured, kindToResource map[string]string, uiLogger action.DebugLog) error { - finalizerPatch := []byte(`[{ - "op": "replace", - "path": "/metadata/finalizers", - "value": [] - }]`) - - for _, cr := range crs { - gv, err := schema.ParseGroupVersion(cr.GetAPIVersion()) - if err != nil { - return err - } - - target := schema.GroupVersionResource{ - Group: gv.Group, - Version: gv.Version, - Resource: kindToResource[cr.GetKind()], - } - - uiLogger(fmt.Sprintf("Patching finalizers for \"%s\" %s", cr.GetName(), cr.GetKind())) - _, err = c.dynamicK8sClient. - Resource(target). - Namespace(cr.GetNamespace()). - Patch(c.Ctx, cr.GetName(), types.JSONPatchType, finalizerPatch, metav1.PatchOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return err - } - } - - return nil -} - func (c *Command) Help() string { c.once.Do(c.init) s := "Usage: consul-k8s uninstall [flags]" + "\n" + "Uninstall Consul with options to delete data and resources associated with Consul installation." + "\n\n" + c.help @@ -584,18 +351,14 @@ func (c *Command) AutocompleteArgs() complete.Predictor { return complete.PredictNothing } -func (c *Command) findExistingInstallation(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - found, releaseName, namespace, err := c.helmActionsRunner.CheckForInstallations(options) +func (c *Command) findExistingInstallation(settings *helmCLI.EnvSettings, uiLogger action.DebugLog) (bool, string, string, error) { + releaseName, namespace, err := common.CheckForInstallations(settings, uiLogger) if err != nil { return false, "", "", err - } else if found && (c.flagNamespace == defaultAllNamespaces || c.flagNamespace == namespace) { + } else if c.flagNamespace == defaultAllNamespaces || c.flagNamespace == namespace { return true, releaseName, namespace, nil } else { - var notFoundError error - if !options.SkipErrorWhenNotFound { - notFoundError = fmt.Errorf("could not find %s installation in cluster", common.ReleaseTypeConsul) - } - return false, "", "", notFoundError + return false, "", "", fmt.Errorf("could not find consul installation in namespace %s", c.flagNamespace) } } @@ -603,7 +366,7 @@ func (c *Command) findExistingInstallation(options *helm.CheckForInstallationsOp func (c *Command) deletePVCs(foundReleaseName, foundReleaseNamespace string) error { var pvcNames []string pvcSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - pvcs, err := c.k8sClient.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).List(c.Ctx, pvcSelector) + pvcs, err := c.kubernetes.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).List(c.Ctx, pvcSelector) if err != nil { return fmt.Errorf("deletePVCs: %s", err) } @@ -612,14 +375,14 @@ func (c *Command) deletePVCs(foundReleaseName, foundReleaseNamespace string) err return nil } for _, pvc := range pvcs.Items { - err := c.k8sClient.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).Delete(c.Ctx, pvc.Name, metav1.DeleteOptions{}) + err := c.kubernetes.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).Delete(c.Ctx, pvc.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deletePVCs: error deleting PVC %q: %s", pvc.Name, err) } pvcNames = append(pvcNames, pvc.Name) } err = backoff.Retry(func() error { - pvcs, err := c.k8sClient.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).List(c.Ctx, pvcSelector) + pvcs, err := c.kubernetes.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).List(c.Ctx, pvcSelector) if err != nil { return fmt.Errorf("deletePVCs: %s", err) } @@ -642,7 +405,7 @@ func (c *Command) deletePVCs(foundReleaseName, foundReleaseNamespace string) err // deleteSecrets deletes any secrets that have the label "managed-by" set to "consul-k8s". func (c *Command) deleteSecrets(foundReleaseNamespace string) error { - secrets, err := c.k8sClient.CoreV1().Secrets(foundReleaseNamespace).List(c.Ctx, metav1.ListOptions{ + secrets, err := c.kubernetes.CoreV1().Secrets(foundReleaseNamespace).List(c.Ctx, metav1.ListOptions{ LabelSelector: common.CLILabelKey + "=" + common.CLILabelValue, }) if err != nil { @@ -654,7 +417,7 @@ func (c *Command) deleteSecrets(foundReleaseNamespace string) error { } var secretNames []string for _, secret := range secrets.Items { - err := c.k8sClient.CoreV1().Secrets(foundReleaseNamespace).Delete(c.Ctx, secret.Name, metav1.DeleteOptions{}) + err := c.kubernetes.CoreV1().Secrets(foundReleaseNamespace).Delete(c.Ctx, secret.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteSecrets: error deleting Secret %q: %s", secret.Name, err) } @@ -673,7 +436,7 @@ func (c *Command) deleteSecrets(foundReleaseNamespace string) error { func (c *Command) deleteServiceAccounts(foundReleaseName, foundReleaseNamespace string) error { var serviceAccountNames []string saSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - sas, err := c.k8sClient.CoreV1().ServiceAccounts(foundReleaseNamespace).List(c.Ctx, saSelector) + sas, err := c.kubernetes.CoreV1().ServiceAccounts(foundReleaseNamespace).List(c.Ctx, saSelector) if err != nil { return fmt.Errorf("deleteServiceAccounts: %s", err) } @@ -682,7 +445,7 @@ func (c *Command) deleteServiceAccounts(foundReleaseName, foundReleaseNamespace return nil } for _, sa := range sas.Items { - err := c.k8sClient.CoreV1().ServiceAccounts(foundReleaseNamespace).Delete(c.Ctx, sa.Name, metav1.DeleteOptions{}) + err := c.kubernetes.CoreV1().ServiceAccounts(foundReleaseNamespace).Delete(c.Ctx, sa.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteServiceAccounts: error deleting ServiceAccount %q: %s", sa.Name, err) } @@ -701,7 +464,7 @@ func (c *Command) deleteServiceAccounts(foundReleaseName, foundReleaseNamespace func (c *Command) deleteRoles(foundReleaseName, foundReleaseNamespace string) error { var roleNames []string roleSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - roles, err := c.k8sClient.RbacV1().Roles(foundReleaseNamespace).List(c.Ctx, roleSelector) + roles, err := c.kubernetes.RbacV1().Roles(foundReleaseNamespace).List(c.Ctx, roleSelector) if err != nil { return fmt.Errorf("deleteRoles: %s", err) } @@ -710,7 +473,7 @@ func (c *Command) deleteRoles(foundReleaseName, foundReleaseNamespace string) er return nil } for _, role := range roles.Items { - err := c.k8sClient.RbacV1().Roles(foundReleaseNamespace).Delete(c.Ctx, role.Name, metav1.DeleteOptions{}) + err := c.kubernetes.RbacV1().Roles(foundReleaseNamespace).Delete(c.Ctx, role.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteRoles: error deleting Role %q: %s", role.Name, err) } @@ -729,7 +492,7 @@ func (c *Command) deleteRoles(foundReleaseName, foundReleaseNamespace string) er func (c *Command) deleteRoleBindings(foundReleaseName, foundReleaseNamespace string) error { var rolebindingNames []string rolebindingSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - rolebindings, err := c.k8sClient.RbacV1().RoleBindings(foundReleaseNamespace).List(c.Ctx, rolebindingSelector) + rolebindings, err := c.kubernetes.RbacV1().RoleBindings(foundReleaseNamespace).List(c.Ctx, rolebindingSelector) if err != nil { return fmt.Errorf("deleteRoleBindings: %s", err) } @@ -738,7 +501,7 @@ func (c *Command) deleteRoleBindings(foundReleaseName, foundReleaseNamespace str return nil } for _, rolebinding := range rolebindings.Items { - err := c.k8sClient.RbacV1().RoleBindings(foundReleaseNamespace).Delete(c.Ctx, rolebinding.Name, metav1.DeleteOptions{}) + err := c.kubernetes.RbacV1().RoleBindings(foundReleaseNamespace).Delete(c.Ctx, rolebinding.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteRoleBindings: error deleting Role %q: %s", rolebinding.Name, err) } @@ -757,7 +520,7 @@ func (c *Command) deleteRoleBindings(foundReleaseName, foundReleaseNamespace str func (c *Command) deleteJobs(foundReleaseName, foundReleaseNamespace string) error { var jobNames []string jobSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - jobs, err := c.k8sClient.BatchV1().Jobs(foundReleaseNamespace).List(c.Ctx, jobSelector) + jobs, err := c.kubernetes.BatchV1().Jobs(foundReleaseNamespace).List(c.Ctx, jobSelector) if err != nil { return fmt.Errorf("deleteJobs: %s", err) } @@ -766,7 +529,7 @@ func (c *Command) deleteJobs(foundReleaseName, foundReleaseNamespace string) err return nil } for _, job := range jobs.Items { - err := c.k8sClient.BatchV1().Jobs(foundReleaseNamespace).Delete(c.Ctx, job.Name, metav1.DeleteOptions{}) + err := c.kubernetes.BatchV1().Jobs(foundReleaseNamespace).Delete(c.Ctx, job.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteJobs: error deleting Job %q: %s", job.Name, err) } @@ -785,7 +548,7 @@ func (c *Command) deleteJobs(foundReleaseName, foundReleaseNamespace string) err func (c *Command) deleteClusterRoles(foundReleaseName string) error { var clusterRolesNames []string clusterRolesSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - clusterRoles, err := c.k8sClient.RbacV1().ClusterRoles().List(c.Ctx, clusterRolesSelector) + clusterRoles, err := c.kubernetes.RbacV1().ClusterRoles().List(c.Ctx, clusterRolesSelector) if err != nil { return fmt.Errorf("deleteClusterRoles: %s", err) } @@ -794,7 +557,7 @@ func (c *Command) deleteClusterRoles(foundReleaseName string) error { return nil } for _, clusterRole := range clusterRoles.Items { - err := c.k8sClient.RbacV1().ClusterRoles().Delete(c.Ctx, clusterRole.Name, metav1.DeleteOptions{}) + err := c.kubernetes.RbacV1().ClusterRoles().Delete(c.Ctx, clusterRole.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteClusterRoles: error deleting cluster role %q: %s", clusterRole.Name, err) } @@ -813,7 +576,7 @@ func (c *Command) deleteClusterRoles(foundReleaseName string) error { func (c *Command) deleteClusterRoleBindings(foundReleaseName string) error { var clusterRoleBindingsNames []string clusterRoleBindingsSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - clusterRoleBindings, err := c.k8sClient.RbacV1().ClusterRoleBindings().List(c.Ctx, clusterRoleBindingsSelector) + clusterRoleBindings, err := c.kubernetes.RbacV1().ClusterRoleBindings().List(c.Ctx, clusterRoleBindingsSelector) if err != nil { return fmt.Errorf("deleteClusterRoleBindings: %s", err) } @@ -822,7 +585,7 @@ func (c *Command) deleteClusterRoleBindings(foundReleaseName string) error { return nil } for _, clusterRoleBinding := range clusterRoleBindings.Items { - err := c.k8sClient.RbacV1().ClusterRoleBindings().Delete(c.Ctx, clusterRoleBinding.Name, metav1.DeleteOptions{}) + err := c.kubernetes.RbacV1().ClusterRoleBindings().Delete(c.Ctx, clusterRoleBinding.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteClusterRoleBindings: error deleting cluster role binding %q: %s", clusterRoleBinding.Name, err) } @@ -836,15 +599,3 @@ func (c *Command) deleteClusterRoleBindings(foundReleaseName string) error { } return nil } - -// mapCRKindToResourceName takes the list of custom resource definitions and -// creates a mapping from the "kind" of the CRD to its "resource" name. -// This is needed for the dynamic API which finds custom resources by their -// lowercase, plural resource name. (e.g. "ingressgateways" for "IngressGateway" kind). -func mapCRKindToResourceName(crds *apiextv1.CustomResourceDefinitionList) map[string]string { - kindToResourceName := make(map[string]string) - for _, crd := range crds.Items { - kindToResourceName[crd.Spec.Names.Kind] = crd.Spec.Names.Plural - } - return kindToResourceName -} diff --git a/cli/cmd/uninstall/uninstall_test.go b/cli/cmd/uninstall/uninstall_test.go index 2adc0960ab..8fa92e92b7 100644 --- a/cli/cmd/uninstall/uninstall_test.go +++ b/cli/cmd/uninstall/uninstall_test.go @@ -1,56 +1,29 @@ package uninstall import ( - "bytes" "context" - "errors" "flag" "fmt" - "io" "os" "testing" "github.com/hashicorp/consul-k8s/cli/common" cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/consul-k8s/cli/helm" "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - helmRelease "helm.sh/helm/v3/pkg/release" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apiextFake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" - dynamicFake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/fake" ) -var ( - serviceDefaultsGRV = schema.GroupVersionResource{ - Group: "consul.hashicorp.com", - Version: "v1alpha1", - Resource: "servicedefaults", - } - nonConsulGRV = schema.GroupVersionResource{ - Group: "example.com", - Version: "v1", - Resource: "examples", - } -) - func TestDeletePVCs(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-server-test1", @@ -75,23 +48,23 @@ func TestDeletePVCs(t *testing.T) { }, }, } - _, err := c.k8sClient.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc, metav1.CreateOptions{}) + _, err := c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc2, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc3, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deletePVCs("consul", "default") require.NoError(t, err) - pvcs, err := c.k8sClient.CoreV1().PersistentVolumeClaims("default").List(context.Background(), metav1.ListOptions{}) + pvcs, err := c.kubernetes.CoreV1().PersistentVolumeClaims("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, pvcs.Items, 1) require.Equal(t, pvcs.Items[0].Name, pvc3.Name) } func TestDeleteSecrets(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-secret1", @@ -117,15 +90,15 @@ func TestDeleteSecrets(t *testing.T) { }, }, } - _, err := c.k8sClient.CoreV1().Secrets("default").Create(context.Background(), secret, metav1.CreateOptions{}) + _, err := c.kubernetes.CoreV1().Secrets("default").Create(context.Background(), secret, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().Secrets("default").Create(context.Background(), secret2, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().Secrets("default").Create(context.Background(), secret2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().Secrets("default").Create(context.Background(), secret3, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().Secrets("default").Create(context.Background(), secret3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteSecrets("default") require.NoError(t, err) - secrets, err := c.k8sClient.CoreV1().Secrets("default").List(context.Background(), metav1.ListOptions{}) + secrets, err := c.kubernetes.CoreV1().Secrets("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) // Only secret1 should have been deleted, secret2 and secret 3 persist since it doesn't have the label. @@ -133,8 +106,8 @@ func TestDeleteSecrets(t *testing.T) { } func TestDeleteServiceAccounts(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() sa := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-sa1", @@ -159,23 +132,23 @@ func TestDeleteServiceAccounts(t *testing.T) { }, }, } - _, err := c.k8sClient.CoreV1().ServiceAccounts("default").Create(context.Background(), sa, metav1.CreateOptions{}) + _, err := c.kubernetes.CoreV1().ServiceAccounts("default").Create(context.Background(), sa, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().ServiceAccounts("default").Create(context.Background(), sa2, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().ServiceAccounts("default").Create(context.Background(), sa2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().ServiceAccounts("default").Create(context.Background(), sa3, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().ServiceAccounts("default").Create(context.Background(), sa3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteServiceAccounts("consul", "default") require.NoError(t, err) - sas, err := c.k8sClient.CoreV1().ServiceAccounts("default").List(context.Background(), metav1.ListOptions{}) + sas, err := c.kubernetes.CoreV1().ServiceAccounts("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, sas.Items, 1) require.Equal(t, sas.Items[0].Name, sa3.Name) } func TestDeleteRoles(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() role := &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-role1", @@ -200,23 +173,23 @@ func TestDeleteRoles(t *testing.T) { }, }, } - _, err := c.k8sClient.RbacV1().Roles("default").Create(context.Background(), role, metav1.CreateOptions{}) + _, err := c.kubernetes.RbacV1().Roles("default").Create(context.Background(), role, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().Roles("default").Create(context.Background(), role2, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().Roles("default").Create(context.Background(), role2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().Roles("default").Create(context.Background(), role3, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().Roles("default").Create(context.Background(), role3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteRoles("consul", "default") require.NoError(t, err) - roles, err := c.k8sClient.RbacV1().Roles("default").List(context.Background(), metav1.ListOptions{}) + roles, err := c.kubernetes.RbacV1().Roles("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, roles.Items, 1) require.Equal(t, roles.Items[0].Name, role3.Name) } func TestDeleteRoleBindings(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() rolebinding := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-role1", @@ -241,23 +214,23 @@ func TestDeleteRoleBindings(t *testing.T) { }, }, } - _, err := c.k8sClient.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding, metav1.CreateOptions{}) + _, err := c.kubernetes.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding2, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding3, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteRoleBindings("consul", "default") require.NoError(t, err) - rolebindings, err := c.k8sClient.RbacV1().RoleBindings("default").List(context.Background(), metav1.ListOptions{}) + rolebindings, err := c.kubernetes.RbacV1().RoleBindings("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, rolebindings.Items, 1) require.Equal(t, rolebindings.Items[0].Name, rolebinding3.Name) } func TestDeleteJobs(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-job1", @@ -282,23 +255,23 @@ func TestDeleteJobs(t *testing.T) { }, }, } - _, err := c.k8sClient.BatchV1().Jobs("default").Create(context.Background(), job, metav1.CreateOptions{}) + _, err := c.kubernetes.BatchV1().Jobs("default").Create(context.Background(), job, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.BatchV1().Jobs("default").Create(context.Background(), job2, metav1.CreateOptions{}) + _, err = c.kubernetes.BatchV1().Jobs("default").Create(context.Background(), job2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.BatchV1().Jobs("default").Create(context.Background(), job3, metav1.CreateOptions{}) + _, err = c.kubernetes.BatchV1().Jobs("default").Create(context.Background(), job3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteJobs("consul", "default") require.NoError(t, err) - jobs, err := c.k8sClient.BatchV1().Jobs("default").List(context.Background(), metav1.ListOptions{}) + jobs, err := c.kubernetes.BatchV1().Jobs("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, jobs.Items, 1) require.Equal(t, jobs.Items[0].Name, job3.Name) } func TestDeleteClusterRoles(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() clusterrole := &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-clusterrole1", @@ -323,23 +296,23 @@ func TestDeleteClusterRoles(t *testing.T) { }, }, } - _, err := c.k8sClient.RbacV1().ClusterRoles().Create(context.Background(), clusterrole, metav1.CreateOptions{}) + _, err := c.kubernetes.RbacV1().ClusterRoles().Create(context.Background(), clusterrole, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().ClusterRoles().Create(context.Background(), clusterrole2, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().ClusterRoles().Create(context.Background(), clusterrole2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().ClusterRoles().Create(context.Background(), clusterrole3, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().ClusterRoles().Create(context.Background(), clusterrole3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteClusterRoles("consul") require.NoError(t, err) - clusterroles, err := c.k8sClient.RbacV1().ClusterRoles().List(context.Background(), metav1.ListOptions{}) + clusterroles, err := c.kubernetes.RbacV1().ClusterRoles().List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, clusterroles.Items, 1) require.Equal(t, clusterroles.Items[0].Name, clusterrole3.Name) } func TestDeleteClusterRoleBindings(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() clusterrolebinding := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-clusterrolebinding1", @@ -364,37 +337,32 @@ func TestDeleteClusterRoleBindings(t *testing.T) { }, }, } - _, err := c.k8sClient.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding, metav1.CreateOptions{}) + _, err := c.kubernetes.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding2, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding3, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteClusterRoleBindings("consul") require.NoError(t, err) - clusterrolebindings, err := c.k8sClient.RbacV1().ClusterRoleBindings().List(context.Background(), metav1.ListOptions{}) + clusterrolebindings, err := c.kubernetes.RbacV1().ClusterRoleBindings().List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, clusterrolebindings.Items, 1) require.Equal(t, clusterrolebindings.Items[0].Name, clusterrolebinding3.Name) } // getInitializedCommand sets up a command struct for tests. -func getInitializedCommand(t *testing.T, buf io.Writer) *Command { +func getInitializedCommand(t *testing.T) *Command { t.Helper() log := hclog.New(&hclog.LoggerOptions{ Name: "cli", Level: hclog.Info, Output: os.Stdout, }) - var ui terminal.UI - if buf != nil { - ui = terminal.NewUI(context.Background(), buf) - } else { - ui = terminal.NewBasicUI(context.Background()) - } + baseCommand := &common.BaseCommand{ Log: log, - UI: ui, + UI: terminal.NewBasicUI(context.TODO()), } c := &Command{ @@ -406,7 +374,7 @@ func getInitializedCommand(t *testing.T, buf io.Writer) *Command { func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { t.Parallel() - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) predictor := cmd.AutocompleteFlags() @@ -429,444 +397,7 @@ func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { } func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } - -func TestFetchCustomResources(t *testing.T) { - cr := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "consul.hashicorp.com/v1alpha1", - "kind": "ServiceDefaults", - "metadata": map[string]interface{}{ - "name": "server", - "namespace": "default", - }, - }, - } - nonConsulCR1 := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "example.com/v1", - "kind": "Example", - "metadata": map[string]interface{}{ - "name": "example-resource", - "namespace": "default", - }, - }, - } - nonConsulCR2 := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "example.com/v1", - "kind": "Example", - "metadata": map[string]interface{}{ - "name": "example-resource", - "namespace": "other", - }, - }, - } - - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "other"}}) - c.apiextK8sClient, c.dynamicK8sClient = createClientsWithCrds() - - _, err := c.dynamicK8sClient.Resource(serviceDefaultsGRV).Namespace("default").Create(context.Background(), &cr, metav1.CreateOptions{}) - require.NoError(t, err) - _, err = c.dynamicK8sClient.Resource(nonConsulGRV).Namespace("default").Create(context.Background(), &nonConsulCR1, metav1.CreateOptions{}) - require.NoError(t, err) - _, err = c.dynamicK8sClient.Resource(nonConsulGRV).Namespace("other").Create(context.Background(), &nonConsulCR2, metav1.CreateOptions{}) - require.NoError(t, err) - - crds, err := c.fetchCustomResourceDefinitions() - require.NoError(t, err) - - actual, err := c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, actual, 1) - require.Contains(t, actual, cr) - require.NotContains(t, actual, nonConsulCR1) - require.NotContains(t, actual, nonConsulCR2) -} - -func TestDeleteCustomResources(t *testing.T) { - cr := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "consul.hashicorp.com/v1alpha1", - "kind": "ServiceDefaults", - "metadata": map[string]interface{}{ - "name": "server", - "namespace": "default", - }, - }, - } - - c := getInitializedCommand(t, nil) - c.apiextK8sClient, c.dynamicK8sClient = createClientsWithCrds() - - _, err := c.dynamicK8sClient.Resource(serviceDefaultsGRV).Namespace("default").Create(context.Background(), &cr, metav1.CreateOptions{}) - require.NoError(t, err) - - crds, err := c.fetchCustomResourceDefinitions() - require.NoError(t, err) - - actual, err := c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, actual, 1) - - err = c.deleteCustomResources([]unstructured.Unstructured{cr}, mapCRKindToResourceName(crds), fakeUILogger) - require.NoError(t, err) - - actual, err = c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, actual, 0) -} - -func TestPatchCustomResources(t *testing.T) { - cr := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "consul.hashicorp.com/v1alpha1", - "kind": "ServiceDefaults", - "metadata": map[string]interface{}{ - "name": "server", - "namespace": "default", - }, - }, - } - cr.SetFinalizers([]string{"consul.hashicorp.com"}) - - c := getInitializedCommand(t, nil) - c.apiextK8sClient, c.dynamicK8sClient = createClientsWithCrds() - - _, err := c.dynamicK8sClient.Resource(serviceDefaultsGRV).Namespace("default").Create(context.Background(), &cr, metav1.CreateOptions{}) - require.NoError(t, err) - - crds, err := c.fetchCustomResourceDefinitions() - require.NoError(t, err) - - err = c.patchCustomResources([]unstructured.Unstructured{cr}, mapCRKindToResourceName(crds), fakeUILogger) - require.NoError(t, err) - - actual, err := c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, actual, 1) - require.Len(t, actual[0].GetFinalizers(), 0) -} - -func TestMapKindToResource(t *testing.T) { - crds := apiextv1.CustomResourceDefinitionList{ - Items: []apiextv1.CustomResourceDefinition{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "servicedefaults.consul.hashicorp.com", - Labels: map[string]string{ - "app": "consul", - }, - }, - Spec: apiextv1.CustomResourceDefinitionSpec{ - Group: "consul.hashicorp.com", - Names: apiextv1.CustomResourceDefinitionNames{ - Plural: "servicedefaults", - Kind: "ServiceDefaults", - }, - Scope: "Namespaced", - Versions: []apiextv1.CustomResourceDefinitionVersion{ - { - Name: "v1alpha1", - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "examples.example.com", - }, - Spec: apiextv1.CustomResourceDefinitionSpec{ - Group: "example.com", - Names: apiextv1.CustomResourceDefinitionNames{ - Plural: "examples", - Kind: "Example", - }, - Scope: "Namespaced", - Versions: []apiextv1.CustomResourceDefinitionVersion{ - { - Name: "v1", - }, - }, - }, - }, - }, - } - - expected := map[string]string{ - "ServiceDefaults": "servicedefaults", - "Example": "examples", - } - - actual := mapCRKindToResourceName(&crds) - require.Equal(t, expected, actual) -} - -func TestUninstall(t *testing.T) { - cases := map[string]struct { - input []string - messages []string - helmActionsRunner *helm.MockActionRunner - preProcessingFunc func() - expectedReturnCode int - expectCheckedForConsulInstallations bool - expectCheckedForConsulDemoInstallations bool - expectConsulUninstalled bool - expectConsulDemoUninstalled bool - }{ - "uninstall when consul installation exists returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n No existing Consul demo application installation found.\n", - "\n==> Checking if Consul can be uninstalled\n ✓ Existing Consul installation found.\n", - "\n==> Consul Uninstall Summary\n Name: consul\n Namespace: consul\n --> Deleting custom resources managed by Consul\n --> Starting delete for \"server\" ServiceDefaults\n ✓ Successfully uninstalled Consul Helm release.\n ✓ Skipping deleting PVCs, secrets, and service accounts.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: true, - expectConsulDemoUninstalled: false, - }, - "uninstall when consul installation does not exist returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n No existing Consul demo application installation found.\n ! could not find Consul installation in cluster\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return false, "", "", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: false, - expectConsulDemoUninstalled: false, - }, - "uninstall with -wipe-data flag processes other resource and returns success": { - input: []string{ - "-wipe-data", - }, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n No existing Consul demo application installation found.\n", - "\n==> Checking if Consul can be uninstalled\n ✓ Existing Consul installation found.\n", - "\n==> Consul Uninstall Summary\n Name: consul\n Namespace: consul\n --> Deleting custom resources managed by Consul\n --> Starting delete for \"server\" ServiceDefaults\n ✓ Successfully uninstalled Consul Helm release.\n", - "\n==> Other Consul Resources\n Deleting data for installation: \n Name: consul\n Namespace consul\n ✓ No PVCs found.\n ✓ No Consul secrets found.\n ✓ No Consul service accounts found.\n ✓ No Consul roles found.\n ✓ No Consul rolebindings found.\n ✓ No Consul jobs found.\n ✓ No Consul cluster roles found.\n ✓ No Consul cluster role bindings found.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: true, - expectConsulDemoUninstalled: false, - }, - "uninstall when both consul and consul demo installations exist returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n ✓ Existing Consul demo application installation found.\n", - "\n==> Consul Demo Application Uninstall Summary\n Name: consul-demo\n Namespace: consul-demo\n ✓ Successfully uninstalled Consul demo application Helm release.\n", - "\n==> Checking if Consul can be uninstalled\n ✓ Existing Consul installation found.\n", - "\n==> Consul Uninstall Summary\n Name: consul\n Namespace: consul\n --> Deleting custom resources managed by Consul\n --> Starting delete for \"server\" ServiceDefaults\n ✓ Successfully uninstalled Consul Helm release.\n ✓ Skipping deleting PVCs, secrets, and service accounts.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: true, - expectConsulDemoUninstalled: true, - }, - "uninstall when consul uninstall errors returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n No existing Consul demo application installation found.\n", - "\n==> Checking if Consul can be uninstalled\n ✓ Existing Consul installation found.\n", - "\n==> Consul Uninstall Summary\n Name: consul\n Namespace: consul\n --> Deleting custom resources managed by Consul\n --> Starting delete for \"server\" ServiceDefaults\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - UninstallFunc: func(uninstall *action.Uninstall, name string) (*helmRelease.UninstallReleaseResponse, error) { - return nil, errors.New("Helm returned an error.") - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: false, - expectConsulDemoUninstalled: false, - }, - "uninstall when consul demo is installed consul demo uninstall errors returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n ✓ Existing Consul demo application installation found.\n", - "\n==> Consul Demo Application Uninstall Summary\n Name: consul-demo\n Namespace: consul-demo\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - UninstallFunc: func(uninstall *action.Uninstall, name string) (*helmRelease.UninstallReleaseResponse, error) { - if name == "consul" { - return &helmRelease.UninstallReleaseResponse{}, nil - } else { - return nil, errors.New("Helm returned an error.") - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: false, - expectConsulDemoUninstalled: false, - }, - } - - cr := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "consul.hashicorp.com/v1alpha1", - "kind": "ServiceDefaults", - "metadata": map[string]interface{}{ - "name": "server", - "namespace": "default", - }, - }, - } - cr.SetFinalizers([]string{"consul.hashicorp.com"}) - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - - c.k8sClient = fake.NewSimpleClientset() - - c.apiextK8sClient, c.dynamicK8sClient = createClientsWithCrds() - _, err := c.dynamicK8sClient.Resource(serviceDefaultsGRV).Namespace("default").Create(context.Background(), &cr, metav1.CreateOptions{}) - require.NoError(t, err) - - mock := tc.helmActionsRunner - c.helmActionsRunner = mock - - if tc.preProcessingFunc != nil { - tc.preProcessingFunc() - } - input := append([]string{ - "--auto-approve", - }, tc.input...) - returnCode := c.Run(input) - output := buf.String() - require.Equal(t, tc.expectedReturnCode, returnCode, output) - - require.Equal(t, tc.expectCheckedForConsulInstallations, mock.CheckedForConsulInstallations) - require.Equal(t, tc.expectCheckedForConsulDemoInstallations, mock.CheckedForConsulDemoInstallations) - require.Equal(t, tc.expectConsulUninstalled, mock.ConsulUninstalled) - require.Equal(t, tc.expectConsulDemoUninstalled, mock.ConsulDemoUninstalled) - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - - if tc.expectConsulUninstalled { - crds, err := c.fetchCustomResourceDefinitions() - require.NoError(t, err) - crs, err := c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, crs, 0) - } - }) - } -} - -func createClientsWithCrds() (apiext.Interface, dynamic.Interface) { - grvToListKind := map[schema.GroupVersionResource]string{ - serviceDefaultsGRV: "ServiceDefaultsList", - nonConsulGRV: "ExamplesList", - } - crds := apiextv1.CustomResourceDefinitionList{ - Items: []apiextv1.CustomResourceDefinition{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "servicedefaults.consul.hashicorp.com", - Labels: map[string]string{ - "app": "consul", - }, - }, - Spec: apiextv1.CustomResourceDefinitionSpec{ - Group: "consul.hashicorp.com", - Names: apiextv1.CustomResourceDefinitionNames{ - Plural: "servicedefaults", - Kind: "ServiceDefaults", - }, - Scope: "Namespaced", - Versions: []apiextv1.CustomResourceDefinitionVersion{ - { - Name: "v1alpha1", - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "examples.example.com", - }, - Spec: apiextv1.CustomResourceDefinitionSpec{ - Group: "example.com", - Names: apiextv1.CustomResourceDefinitionNames{ - Plural: "examples", - Kind: "Example", - }, - Scope: "Namespaced", - Versions: []apiextv1.CustomResourceDefinitionVersion{ - { - Name: "v1", - }, - }, - }, - }, - }, - } - return apiextFake.NewSimpleClientset(&crds), dynamicFake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), grvToListKind) -} - -func fakeUILogger(s string, i ...interface{}) {} diff --git a/cli/cmd/upgrade/upgrade.go b/cli/cmd/upgrade/upgrade.go index 4c962c47b5..e1bb744ce1 100644 --- a/cli/cmd/upgrade/upgrade.go +++ b/cli/cmd/upgrade/upgrade.go @@ -3,7 +3,6 @@ package upgrade import ( "errors" "fmt" - "net/http" "os" "strings" "sync" @@ -15,14 +14,12 @@ import ( "github.com/hashicorp/consul-k8s/cli/common/terminal" "github.com/hashicorp/consul-k8s/cli/config" "github.com/hashicorp/consul-k8s/cli/helm" - "github.com/hashicorp/consul-k8s/cli/preset" "github.com/posener/complete" - + "helm.sh/helm/v3/pkg/action" helmCLI "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/cli/values" "helm.sh/helm/v3/pkg/getter" "k8s.io/client-go/kubernetes" - "k8s.io/utils/strings/slices" ) const ( @@ -51,39 +48,26 @@ const ( flagNameContext = "context" flagNameKubeconfig = "kubeconfig" - - flagNameDemo = "demo" - defaultDemo = false - - flagNameHCPResourceID = "hcp-resource-id" - - consulDemoChartPath = "demo" ) type Command struct { *common.BaseCommand - helmActionsRunner helm.HelmActionsRunner - kubernetes kubernetes.Interface - httpClient *http.Client - set *flag.Sets - flagPreset string - flagDryRun bool - flagAutoApprove bool - flagValueFiles []string - flagSetStringValues []string - flagSetValues []string - flagFileValues []string - flagTimeout string - timeoutDuration time.Duration - flagVerbose bool - flagWait bool - flagNameHCPResourceID string - flagDemo bool + flagPreset string + flagDryRun bool + flagAutoApprove bool + flagValueFiles []string + flagSetStringValues []string + flagSetValues []string + flagFileValues []string + flagTimeout string + timeoutDuration time.Duration + flagVerbose bool + flagWait bool flagKubeConfig string flagKubeContext string @@ -93,6 +77,12 @@ type Command struct { } func (c *Command) init() { + // Store all the possible preset values in 'presetList'. Printed in the help message. + var presetList []string + for name := range config.Presets { + presetList = append(presetList, name) + } + c.set = flag.NewSets() f := c.set.NewSet("Command Options") f.BoolVar(&flag.BoolVar{ @@ -117,7 +107,7 @@ func (c *Command) init() { Name: flagNamePreset, Target: &c.flagPreset, Default: defaultPreset, - Usage: fmt.Sprintf("Use an upgrade preset, one of %s. Defaults to none", strings.Join(preset.Presets, ", ")), + Usage: fmt.Sprintf("Use an upgrade preset, one of %s. Defaults to none", strings.Join(presetList, ", ")), }) f.StringSliceVar(&flag.StringSliceVar{ Name: flagNameSetValues, @@ -169,19 +159,6 @@ func (c *Command) init() { Default: "", Usage: "Set the Kubernetes context to use.", }) - f.StringVar(&flag.StringVar{ - Name: flagNameHCPResourceID, - Target: &c.flagNameHCPResourceID, - Default: "", - Usage: "Set the HCP resource_id when using the 'cloud' preset.", - }) - f.BoolVar(&flag.BoolVar{ - Name: flagNameDemo, - Target: &c.flagDemo, - Default: defaultDemo, - Usage: fmt.Sprintf("Install %s immediately after installing %s.", - common.ReleaseTypeConsulDemo, common.ReleaseTypeConsul), - }) c.help = c.set.Help() } @@ -192,10 +169,6 @@ func (c *Command) Run(args []string) int { defer common.CloseWithError(c.BaseCommand) - if c.helmActionsRunner == nil { - c.helmActionsRunner = &helm.ActionRunner{} - } - err := c.validateFlags(args) if err != nil { c.UI.Output(err.Error()) @@ -243,144 +216,99 @@ func (c *Command) Run(args []string) int { c.UI.Output("Checking if Consul can be upgraded", terminal.WithHeaderStyle()) uiLogger := c.createUILogger() - found, consulName, consulNamespace, err := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.DefaultReleaseName, - DebugLog: uiLogger, - }) - + name, namespace, err := common.CheckForInstallations(settings, uiLogger) if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 - } - if !found { c.UI.Output("Cannot upgrade Consul. Existing Consul installation not found. Use the command `consul-k8s install` to install Consul.", terminal.WithErrorStyle()) return 1 - } else { - c.UI.Output("Existing %s installation found to be upgraded.", common.ReleaseTypeConsul, terminal.WithSuccessStyle()) - c.UI.Output("Name: %s\nNamespace: %s", consulName, consulNamespace, terminal.WithInfoStyle()) } + c.UI.Output("Existing Consul installation found to be upgraded.", terminal.WithSuccessStyle()) + c.UI.Output("Name: %s\nNamespace: %s", name, namespace, terminal.WithInfoStyle()) - c.UI.Output(fmt.Sprintf("Checking if %s can be upgraded", common.ReleaseTypeConsulDemo), terminal.WithHeaderStyle()) - // Ensure there is not an existing Consul demo installation which would cause a conflict. - foundDemo, demoName, demoNamespace, _ := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.ConsulDemoAppReleaseName, - DebugLog: uiLogger, - }) - if foundDemo { - c.UI.Output("Existing %s installation found to be upgraded.", common.ReleaseTypeConsulDemo, terminal.WithSuccessStyle()) - c.UI.Output("Name: %s\nNamespace: %s", demoName, demoNamespace, terminal.WithInfoStyle()) - } else { - if c.flagDemo { - c.UI.Output("No existing %s installation found, but -demo flag provided. %s will be installed in namespace %s.", - common.ConsulDemoAppReleaseName, common.ConsulDemoAppReleaseName, consulNamespace, terminal.WithInfoStyle()) - } else { - c.UI.Output("No existing %s installation found.", common.ReleaseTypeConsulDemo, terminal.WithInfoStyle()) - } - } - - // Handle preset, value files, and set values logic. - chartValues, err := c.mergeValuesFlagsWithPrecedence(settings, consulNamespace) + chart, err := helm.LoadChart(consulChart.ConsulHelmChart, common.TopLevelChartDirName) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } + c.UI.Output("Loaded charts", terminal.WithSuccessStyle()) - // Without informing the user, default global.name to consul if it hasn't been set already. We don't allow setting - // the release name, and since that is hardcoded to "consul", setting global.name to "consul" makes it so resources - // aren't double prefixed with "consul-consul-...". - chartValues = common.MergeMaps(config.ConvertToMap(config.GlobalNameConsul), chartValues) - - timeout, err := time.ParseDuration(c.flagTimeout) + currentChartValues, err := helm.FetchChartValues(namespace, name, settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - options := &helm.UpgradeOptions{ - ReleaseName: consulName, - ReleaseType: common.ReleaseTypeConsul, - ReleaseTypeName: common.ReleaseTypeConsul, - Namespace: consulNamespace, - Values: chartValues, - Settings: settings, - EmbeddedChart: consulChart.ConsulHelmChart, - ChartDirName: common.TopLevelChartDirName, - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } - err = helm.UpgradeHelmRelease(options) + // Handle preset, value files, and set values logic. + chartValues, err := c.mergeValuesFlagsWithPrecedence(settings) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - timeout, err = time.ParseDuration(c.flagTimeout) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) + // Without informing the user, default global.name to consul if it hasn't been set already. We don't allow setting + // the release name, and since that is hardcoded to "consul", setting global.name to "consul" makes it so resources + // aren't double prefixed with "consul-consul-...". + chartValues = common.MergeMaps(config.Convert(config.GlobalNameConsul), chartValues) + + // Print out the upgrade summary. + if err = c.printDiff(currentChartValues, chartValues); err != nil { + c.UI.Output("Could not print the different between current and upgraded charts: %v", err, terminal.WithErrorStyle()) return 1 } - if foundDemo { - options := &helm.UpgradeOptions{ - ReleaseName: demoName, - ReleaseType: common.ReleaseTypeConsulDemo, - ReleaseTypeName: common.ConsulDemoAppReleaseName, - Namespace: demoNamespace, - Values: make(map[string]interface{}), - Settings: settings, - EmbeddedChart: consulChart.DemoHelmChart, - ChartDirName: consulDemoChartPath, - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } + // Check if the user is OK with the upgrade unless the auto approve or dry run flags are true. + if !c.flagAutoApprove && !c.flagDryRun { + confirmation, err := c.UI.Input(&terminal.Input{ + Prompt: "Proceed with upgrade? (y/N)", + Style: terminal.InfoStyle, + Secret: false, + }) - err = helm.UpgradeHelmRelease(options) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - } else if c.flagDemo { - - options := &helm.InstallOptions{ - ReleaseName: common.ConsulDemoAppReleaseName, - ReleaseType: common.ReleaseTypeConsulDemo, - Namespace: settings.Namespace(), - Values: make(map[string]interface{}), - Settings: settings, - EmbeddedChart: consulChart.DemoHelmChart, - ChartDirName: consulDemoChartPath, - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } - err = helm.InstallDemoApp(options) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) + if common.Abort(confirmation) { + c.UI.Output("Upgrade aborted. Use the command `consul-k8s upgrade -help` to learn how to customize your upgrade.", + terminal.WithInfoStyle()) return 1 } } + if !c.flagDryRun { + c.UI.Output("Upgrading Consul", terminal.WithHeaderStyle()) + } else { + c.UI.Output("Performing Dry Run Upgrade", terminal.WithHeaderStyle()) + } + + // Setup action configuration for Helm Go SDK function calls. + actionConfig := new(action.Configuration) + actionConfig, err = helm.InitActionConfig(actionConfig, namespace, settings, uiLogger) + if err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + + // Setup the upgrade action. + upgrade := action.NewUpgrade(actionConfig) + upgrade.Namespace = namespace + upgrade.DryRun = c.flagDryRun + upgrade.Wait = c.flagWait + upgrade.Timeout = c.timeoutDuration + + // Run the upgrade. Note that the dry run config is passed into the upgrade action, so upgrade.Run is called even during a dry run. + _, err = upgrade.Run(common.DefaultReleaseName, chart, chartValues) + if err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + if c.flagDryRun { c.UI.Output("Dry run complete. No changes were made to the Kubernetes cluster.\n"+ "Upgrade can proceed with this configuration.", terminal.WithInfoStyle()) return 0 } + + c.UI.Output("Consul upgraded in namespace %q.", namespace, terminal.WithSuccessStyle()) return 0 } @@ -401,8 +329,6 @@ func (c *Command) AutocompleteFlags() complete.Flags { fmt.Sprintf("-%s", flagNameWait): complete.PredictNothing, fmt.Sprintf("-%s", flagNameContext): complete.PredictNothing, fmt.Sprintf("-%s", flagNameKubeconfig): complete.PredictFiles("*"), - fmt.Sprintf("-%s", flagNameDemo): complete.PredictNothing, - fmt.Sprintf("-%s", flagNameHCPResourceID): complete.PredictNothing, } } @@ -424,7 +350,7 @@ func (c *Command) validateFlags(args []string) error { if len(c.flagValueFiles) != 0 && c.flagPreset != defaultPreset { return fmt.Errorf("cannot set both -%s and -%s", flagNameConfigFile, flagNamePreset) } - if ok := slices.Contains(preset.Presets, c.flagPreset); c.flagPreset != defaultPreset && !ok { + if _, ok := config.Presets[c.flagPreset]; c.flagPreset != defaultPreset && !ok { return fmt.Errorf("'%s' is not a valid preset", c.flagPreset) } if _, err := time.ParseDuration(c.flagTimeout); err != nil { @@ -438,20 +364,6 @@ func (c *Command) validateFlags(args []string) error { } } - if c.flagPreset == preset.PresetCloud { - clientID := os.Getenv(preset.EnvHCPClientID) - clientSecret := os.Getenv(preset.EnvHCPClientSecret) - if clientID == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' environment variable must also be set", preset.PresetCloud, preset.EnvHCPClientID) - } else if clientSecret == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' environment variable must also be set", preset.PresetCloud, preset.EnvHCPClientSecret) - } else if c.flagNameHCPResourceID == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' flag must also be provided", preset.PresetCloud, flagNameHCPResourceID) - } - } else if c.flagNameHCPResourceID != "" { - return fmt.Errorf("The '%s' flag can only be used with the '%s' preset", flagNameHCPResourceID, preset.PresetCloud) - } - return nil } @@ -464,7 +376,7 @@ func (c *Command) validateFlags(args []string) error { // 5. -set-file // For example, -set-file will override a value provided via -set. // Within each of these groups the rightmost flag value has the highest precedence. -func (c *Command) mergeValuesFlagsWithPrecedence(settings *helmCLI.EnvSettings, namespace string) (map[string]interface{}, error) { +func (c *Command) mergeValuesFlagsWithPrecedence(settings *helmCLI.EnvSettings) (map[string]interface{}, error) { p := getter.All(settings) v := &values.Options{ ValueFiles: c.flagValueFiles, @@ -478,14 +390,7 @@ func (c *Command) mergeValuesFlagsWithPrecedence(settings *helmCLI.EnvSettings, } if c.flagPreset != defaultPreset { // Note the ordering of the function call, presets have lower precedence than set vals. - p, err := c.getPreset(c.flagPreset, namespace) - if err != nil { - return nil, fmt.Errorf("error getting preset provider: %s", err) - } - presetMap, err := p.GetValueMap() - if err != nil { - return nil, fmt.Errorf("error getting preset values: %s", err) - } + presetMap := config.Presets[c.flagPreset].(map[string]interface{}) vals = common.MergeMaps(presetMap, vals) } return vals, err @@ -519,22 +424,24 @@ func (c *Command) createUILogger() func(string, ...interface{}) { } } -// getPreset is a factory function that, given a string, produces a struct that -// implements the Preset interface. If the string is not recognized an error is -// returned. -func (c *Command) getPreset(name string, namespace string) (preset.Preset, error) { - hcpConfig := preset.GetHCPPresetFromEnv(c.flagNameHCPResourceID) - getPresetConfig := &preset.GetPresetConfig{ - Name: name, - CloudPreset: &preset.CloudPreset{ - KubernetesClient: c.kubernetes, - KubernetesNamespace: namespace, - SkipSavingSecrets: true, - UI: c.UI, - HTTPClient: c.httpClient, - HCPConfig: hcpConfig, - Context: c.Ctx, - }, +// printDiff marshals both maps to YAML and prints the diff between the two. +func (c *Command) printDiff(old, new map[string]interface{}) error { + diff, err := common.Diff(old, new) + if err != nil { + return err } - return preset.GetPreset(getPresetConfig) + + c.UI.Output("\nDifference between user overrides for current and upgraded charts"+ + "\n--------------------------------------------------------------", terminal.WithInfoStyle()) + for _, line := range strings.Split(diff, "\n") { + if strings.HasPrefix(line, "+") { + c.UI.Output(line, terminal.WithDiffAddedStyle()) + } else if strings.HasPrefix(line, "-") { + c.UI.Output(line, terminal.WithDiffRemovedStyle()) + } else { + c.UI.Output(line, terminal.WithDiffUnchangedStyle()) + } + } + + return nil } diff --git a/cli/cmd/upgrade/upgrade_test.go b/cli/cmd/upgrade/upgrade_test.go index 2f2168496d..9b4636eb57 100644 --- a/cli/cmd/upgrade/upgrade_test.go +++ b/cli/cmd/upgrade/upgrade_test.go @@ -1,29 +1,16 @@ package upgrade import ( - "bytes" - "context" - "errors" "flag" "fmt" - "io" "os" "testing" "github.com/hashicorp/consul-k8s/cli/common" cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/consul-k8s/cli/helm" - "github.com/hashicorp/consul-k8s/cli/preset" "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmRelease "helm.sh/helm/v3/pkg/release" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" ) // TestValidateFlags tests the validate flags function. @@ -56,7 +43,7 @@ func TestValidateFlags(t *testing.T) { } for _, testCase := range testCases { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) t.Run(testCase.description, func(t *testing.T) { if err := c.validateFlags(testCase.input); err == nil { t.Errorf("Test case should have failed.") @@ -66,22 +53,16 @@ func TestValidateFlags(t *testing.T) { } // getInitializedCommand sets up a command struct for tests. -func getInitializedCommand(t *testing.T, buf io.Writer) *Command { +func getInitializedCommand(t *testing.T) *Command { t.Helper() log := hclog.New(&hclog.LoggerOptions{ Name: "cli", Level: hclog.Info, Output: os.Stdout, }) - var ui terminal.UI - if buf != nil { - ui = terminal.NewUI(context.Background(), buf) - } else { - ui = terminal.NewBasicUI(context.Background()) - } + baseCommand := &common.BaseCommand{ Log: log, - UI: ui, } c := &Command{ @@ -93,7 +74,7 @@ func getInitializedCommand(t *testing.T, buf io.Writer) *Command { func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { t.Parallel() - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) predictor := cmd.AutocompleteFlags() @@ -116,437 +97,7 @@ func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { } func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } - -func TestGetPreset(t *testing.T) { - testCases := []struct { - description string - presetName string - }{ - { - "'cloud' should return a CloudPreset'.", - preset.PresetCloud, - }, - { - "'quickstart' should return a QuickstartPreset'.", - preset.PresetQuickstart, - }, - { - "'secure' should return a SecurePreset'.", - preset.PresetSecure, - }, - } - - for _, tc := range testCases { - c := getInitializedCommand(t, nil) - t.Run(tc.description, func(t *testing.T) { - p, err := c.getPreset(tc.presetName, "consul") - require.NoError(t, err) - switch p.(type) { - case *preset.CloudPreset: - require.Equal(t, preset.PresetCloud, tc.presetName) - case *preset.QuickstartPreset: - require.Equal(t, preset.PresetQuickstart, tc.presetName) - case *preset.SecurePreset: - require.Equal(t, preset.PresetSecure, tc.presetName) - } - }) - } -} - -// TestValidateCloudPresets tests the validate flags function when passed the cloud preset. -func TestValidateCloudPresets(t *testing.T) { - testCases := []struct { - description string - input []string - preProcessingFunc func() - postProcessingFunc func() - expectError bool - }{ - { - "Should not error on cloud preset when HCP_CLIENT_ID and HCP_CLIENT_SECRET envvars are present and hcp-resource-id parameter is provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Setenv("HCP_CLIENT_ID", "") - os.Setenv("HCP_CLIENT_SECRET", "") - }, - false, - }, - { - "Should error on cloud preset when HCP_CLIENT_ID is not provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error on cloud preset when HCP_CLIENT_SECRET is not provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error on cloud preset when -hcp-resource-id flag is not provided.", - []string{"-preset=cloud"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error when -hcp-resource-id flag is provided but cloud preset is not specified.", - []string{"-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - } - - for _, testCase := range testCases { - testCase.preProcessingFunc() - c := getInitializedCommand(t, nil) - t.Run(testCase.description, func(t *testing.T) { - err := c.validateFlags(testCase.input) - if testCase.expectError && err == nil { - t.Errorf("Test case should have failed.") - } else if !testCase.expectError && err != nil { - t.Errorf("Test case should not have failed.") - } - }) - testCase.postProcessingFunc() - } -} - -func TestUpgrade(t *testing.T) { - var k8s kubernetes.Interface - cases := map[string]struct { - input []string - messages []string - helmActionsRunner *helm.MockActionRunner - preProcessingFunc func() - expectedReturnCode int - expectCheckedForConsulInstallations bool - expectCheckedForConsulDemoInstallations bool - expectConsulUpgraded bool - expectConsulDemoUpgraded bool - expectConsulDemoInstalled bool - }{ - "upgrade when consul installation exists returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - }, - "upgrade when consul installation does not exists returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ! Cannot upgrade Consul. Existing Consul installation not found. Use the command `consul-k8s install` to install Consul.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return false, "", "", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulUpgraded: false, - expectConsulDemoUpgraded: false, - }, - "upgrade when consul upgrade errors returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n + global:\n + name: consul\n \n\n==> Upgrading Consul\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - UpgradeFunc: func(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*helmRelease.Release, error) { - return nil, errors.New("Helm returned an error.") - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: false, - expectConsulDemoUpgraded: false, - }, - "upgrade when demo flag provided but no demo installation exists installs demo and returns success": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing consul-demo installation found, but -demo flag provided. consul-demo will be installed in namespace consul.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: consul\n \n \n", - "\n==> Installing Consul demo application\n ✓ Downloaded charts.\n ✓ Consul demo application installed in namespace \"consul\".\n", - "\n==> Accessing Consul Demo Application UI\n kubectl port-forward deploy/frontend 8080:80 --namespace consul\n Browse to http://localhost:8080.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - expectConsulDemoInstalled: true, - }, - "upgrade when demo flag provided and demo installation exists upgrades demo and returns success": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n ✓ Existing Consul demo application installation found to be upgraded.\n Name: consul-demo\n Namespace: consul-demo\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - "\n==> Consul-Demo Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n \n", - "\n==> Upgrading consul-demo\n ✓ Consul-Demo upgraded in namespace \"consul-demo\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: true, - expectConsulDemoInstalled: false, - }, - "upgrade when demo flag not provided but demo installation exists upgrades demo and returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n ✓ Existing Consul demo application installation found to be upgraded.\n Name: consul-demo\n Namespace: consul-demo\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - "\n==> Consul-Demo Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n \n", - "\n==> Upgrading consul-demo\n ✓ Consul-Demo upgraded in namespace \"consul-demo\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: true, - expectConsulDemoInstalled: false, - }, - "upgrade when demo upgrade errors returns error with consul being upgraded but demo not being upgraded": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n ✓ Existing Consul demo application installation found to be upgraded.\n Name: consul-demo\n Namespace: consul-demo\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - "\n==> Consul-Demo Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n \n", - "\n==> Upgrading consul-demo\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - UpgradeFunc: func(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*helmRelease.Release, error) { - if name == "consul" { - return &helmRelease.Release{}, nil - } else { - return nil, errors.New("Helm returned an error.") - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - }, - "upgrade with quickstart preset when consul installation exists returns success": { - input: []string{ - "-preset", "quickstart", - }, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n + connectInject:\n + enabled: true\n + metrics:\n + defaultEnableMerging: true\n + defaultEnabled: true\n + enableGatewayMetrics: true\n + controller:\n + enabled: true\n + global:\n + metrics:\n + enableAgentMetrics: true\n + enabled: true\n + name: consul\n + prometheus:\n + enabled: true\n + server:\n + replicas: 1\n + ui:\n + enabled: true\n + service:\n + enabled: true\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - }, - "upgrade with secure preset when consul installation exists returns success": { - input: []string{ - "-preset", "secure", - }, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n + connectInject:\n + enabled: true\n + controller:\n + enabled: true\n + global:\n + acls:\n + manageSystemACLs: true\n + gossipEncryption:\n + autoGenerate: true\n + name: consul\n + tls:\n + enableAutoEncrypt: true\n + enabled: true\n + server:\n + replicas: 1\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - }, - "upgrade with --dry-run flag when consul installation exists returns success": { - input: []string{ - "--dry-run", - }, - messages: []string{ - " Performing dry run upgrade. No changes will be made to the cluster.\n", - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Performing Dry Run Upgrade\n Dry run complete. No changes were made to the Kubernetes cluster.\n Upgrade can proceed with this configuration.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: false, - expectConsulDemoUpgraded: false, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - k8s = fake.NewSimpleClientset() - c.kubernetes = k8s - mock := tc.helmActionsRunner - c.helmActionsRunner = mock - if tc.preProcessingFunc != nil { - tc.preProcessingFunc() - } - input := append([]string{ - "--auto-approve", - }, tc.input...) - returnCode := c.Run(input) - require.Equal(t, tc.expectedReturnCode, returnCode) - require.Equal(t, tc.expectCheckedForConsulInstallations, mock.CheckedForConsulInstallations) - require.Equal(t, tc.expectCheckedForConsulDemoInstallations, mock.CheckedForConsulDemoInstallations) - require.Equal(t, tc.expectConsulUpgraded, mock.ConsulUpgraded) - require.Equal(t, tc.expectConsulDemoUpgraded, mock.ConsulDemoUpgraded) - require.Equal(t, tc.expectConsulDemoInstalled, mock.ConsulDemoInstalled) - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) - } -} diff --git a/cli/common/error.go b/cli/common/error.go deleted file mode 100644 index 3d8e3deb51..0000000000 --- a/cli/common/error.go +++ /dev/null @@ -1,25 +0,0 @@ -package common - -// DanglingResourceError should be used when a request was made to remove -// a resource and the resource still remains after enough time has elapsed -// that it should have been removed by Kubernetes. -type DanglingResourceError struct { - message string -} - -// NewDanglingResourceError returns a new instance of DanglingResourceError with -// the given message. -func NewDanglingResourceError(message string) *DanglingResourceError { - return &DanglingResourceError{message} -} - -// Error returns a string representation of the dangling resource error. -func (d *DanglingResourceError) Error() string { - return d.message -} - -// IsDanglingResourceError returns true if the error passed in is of type DanglingResourceError. -func IsDanglingResourceError(err error) bool { - _, ok := err.(*DanglingResourceError) - return ok -} diff --git a/cli/common/terminal/basic.go b/cli/common/terminal/basic.go index e8866415bc..e8411b9aec 100644 --- a/cli/common/terminal/basic.go +++ b/cli/common/terminal/basic.go @@ -79,7 +79,7 @@ func (ui *basicUI) Interactive() bool { return isatty.IsTerminal(os.Stdin.Fd()) } -// Output prints the given message using the formatting options passed in. +// Output implements UI. func (ui *basicUI) Output(msg string, raw ...interface{}) { msg, style, w := ui.parse(msg, raw...) @@ -115,6 +115,7 @@ func (ui *basicUI) Output(msg string, raw ...interface{}) { msg = strings.Join(lines, "\n") } + // Write it fmt.Fprintln(w, msg) } diff --git a/cli/common/terminal/ui.go b/cli/common/terminal/ui.go index dde5d532ba..b90d05b197 100644 --- a/cli/common/terminal/ui.go +++ b/cli/common/terminal/ui.go @@ -7,36 +7,6 @@ import ( "github.com/fatih/color" ) -const ( - HeaderStyle = "header" - ErrorStyle = "error" - ErrorBoldStyle = "error-bold" - WarningStyle = "warning" - WarningBoldStyle = "warning-bold" - InfoStyle = "info" - LibraryStyle = "library" - SuccessStyle = "success" - SuccessBoldStyle = "success-bold" - DiffUnchangedStyle = "diff-unchanged" - DiffAddedStyle = "diff-added" - DiffRemovedStyle = "diff-removed" -) - -var ( - colorHeader = color.New(color.Bold) - colorInfo = color.New() - colorError = color.New(color.FgRed) - colorErrorBold = color.New(color.FgRed, color.Bold) - colorLibrary = color.New(color.FgCyan) - colorSuccess = color.New(color.FgGreen) - colorSuccessBold = color.New(color.FgGreen, color.Bold) - colorWarning = color.New(color.FgYellow) - colorWarningBold = color.New(color.FgYellow, color.Bold) - colorDiffUnchanged = color.New() - colorDiffAdded = color.New(color.FgGreen) - colorDiffRemoved = color.New(color.FgRed) -) - // ErrNonInteractive is returned when Input is called on a non-Interactive UI. var ErrNonInteractive = errors.New("noninteractive UI doesn't support this operation") @@ -95,6 +65,21 @@ type Input struct { Secret bool } +const ( + HeaderStyle = "header" + ErrorStyle = "error" + ErrorBoldStyle = "error-bold" + WarningStyle = "warning" + WarningBoldStyle = "warning-bold" + InfoStyle = "info" + LibraryStyle = "library" + SuccessStyle = "success" + SuccessBoldStyle = "success-bold" + DiffUnchangedStyle = "diff-unchanged" + DiffAddedStyle = "diff-added" + DiffRemovedStyle = "diff-removed" +) + type config struct { // Writer is where the message will be written to. Writer io.Writer @@ -182,3 +167,18 @@ func WithStyle(style string) Option { func WithWriter(w io.Writer) Option { return func(c *config) { c.Writer = w } } + +var ( + colorHeader = color.New(color.Bold) + colorInfo = color.New() + colorError = color.New(color.FgRed) + colorErrorBold = color.New(color.FgRed, color.Bold) + colorLibrary = color.New(color.FgCyan) + colorSuccess = color.New(color.FgGreen) + colorSuccessBold = color.New(color.FgGreen, color.Bold) + colorWarning = color.New(color.FgYellow) + colorWarningBold = color.New(color.FgYellow, color.Bold) + colorDiffUnchanged = color.New() + colorDiffAdded = color.New(color.FgGreen) + colorDiffRemoved = color.New(color.FgRed) +) diff --git a/cli/common/utils.go b/cli/common/utils.go index b2e9714a9d..e03238bfb0 100644 --- a/cli/common/utils.go +++ b/cli/common/utils.go @@ -1,17 +1,19 @@ package common import ( + "errors" + "fmt" "os" "strings" + + "helm.sh/helm/v3/pkg/action" + helmCLI "helm.sh/helm/v3/pkg/cli" ) const ( - DefaultReleaseName = "consul" - DefaultReleaseNamespace = "consul" - ConsulDemoAppReleaseName = "consul-demo" - TopLevelChartDirName = "consul" - ReleaseTypeConsul = "Consul" - ReleaseTypeConsulDemo = "Consul demo application" + DefaultReleaseName = "consul" + DefaultReleaseNamespace = "consul" + TopLevelChartDirName = "consul" // CLILabelKey and CLILabelValue are added to each secret on creation so the CLI knows // which key to delete on an uninstall. @@ -25,6 +27,32 @@ func Abort(raw string) bool { return !(strings.ToLower(confirmation) == "y" || strings.ToLower(confirmation) == "yes") } +// CheckForInstallations uses the helm Go SDK to find helm releases in all namespaces where the chart name is +// "consul", and returns the release name and namespace if found, or an error if not found. +func CheckForInstallations(settings *helmCLI.EnvSettings, uiLogger action.DebugLog) (string, string, error) { + // Need a specific action config to call helm list, where namespace is NOT specified. + listConfig := new(action.Configuration) + if err := listConfig.Init(settings.RESTClientGetter(), "", + os.Getenv("HELM_DRIVER"), uiLogger); err != nil { + return "", "", fmt.Errorf("couldn't initialize helm config: %s", err) + } + + lister := action.NewList(listConfig) + lister.AllNamespaces = true + lister.StateMask = action.ListAll + res, err := lister.Run() + if err != nil { + return "", "", fmt.Errorf("couldn't check for installations: %s", err) + } + + for _, rel := range res { + if rel.Chart.Metadata.Name == "consul" { + return rel.Name, rel.Namespace, nil + } + } + return "", "", errors.New("couldn't find consul installation") +} + // MergeMaps merges two maps giving b precedent. // @source: https://github.com/helm/helm/blob/main/pkg/cli/values/options.go func MergeMaps(a, b map[string]interface{}) map[string]interface{} { diff --git a/cli/config/config.go b/cli/config/config.go deleted file mode 100644 index d964bc3b5c..0000000000 --- a/cli/config/config.go +++ /dev/null @@ -1,16 +0,0 @@ -package config - -import "sigs.k8s.io/yaml" - -// GlobalNameConsul is used to set the global name of an install to consul. -const GlobalNameConsul = ` -global: - name: consul -` - -// ConvertToMap is a helper function that converts a YAML string to a map. -func ConvertToMap(s string) map[string]interface{} { - var m map[string]interface{} - _ = yaml.Unmarshal([]byte(s), &m) - return m -} diff --git a/cli/config/presets.go b/cli/config/presets.go new file mode 100644 index 0000000000..06b91ce8ce --- /dev/null +++ b/cli/config/presets.go @@ -0,0 +1,71 @@ +package config + +import "sigs.k8s.io/yaml" + +const ( + PresetDemo = "demo" + PresetSecure = "secure" +) + +// Presets is a map of pre-configured helm values. +var Presets = map[string]interface{}{ + PresetDemo: Convert(demo), + PresetSecure: Convert(secure), +} + +// demo is a preset of common values for setting up Consul. +const demo = ` +global: + name: consul + metrics: + enabled: true + enableAgentMetrics: true +connectInject: + enabled: true + metrics: + defaultEnabled: true + defaultEnableMerging: true + enableGatewayMetrics: true +server: + replicas: 1 +controller: + enabled: true +ui: + enabled: true + service: + enabled: true +prometheus: + enabled: true +` + +// secure is a preset of common values for setting up Consul in a secure manner. +const secure = ` +global: + name: consul + gossipEncryption: + autoGenerate: true + tls: + enabled: true + enableAutoEncrypt: true + acls: + manageSystemACLs: true +server: + replicas: 1 +connectInject: + enabled: true +controller: + enabled: true +` + +// GlobalNameConsul is used to set the global name of an install to consul. +const GlobalNameConsul = ` +global: + name: consul +` + +// convert is a helper function that converts a YAML string to a map. +func Convert(s string) map[string]interface{} { + var m map[string]interface{} + _ = yaml.Unmarshal([]byte(s), &m) + return m +} diff --git a/cli/go.mod b/cli/go.mod index 2017bd30cc..cd95397863 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/consul-k8s/cli -go 1.19 +go 1.18 require ( github.com/bgentry/speakeasy v0.1.0 @@ -9,17 +9,14 @@ require ( github.com/google/go-cmp v0.5.8 github.com/hashicorp/consul-k8s/charts v0.0.0-00010101000000-000000000000 github.com/hashicorp/go-hclog v0.16.2 - github.com/hashicorp/hcp-sdk-go v0.23.1-0.20220921131124-49168300a7dc github.com/kr/text v0.2.0 github.com/mattn/go-isatty v0.0.14 github.com/mitchellh/cli v1.1.2 github.com/olekukonko/tablewriter v0.0.5 github.com/posener/complete v1.1.1 github.com/stretchr/testify v1.7.2 - golang.org/x/text v0.3.7 helm.sh/helm/v3 v3.9.4 k8s.io/api v0.25.0 - k8s.io/apiextensions-apiserver v0.25.0 k8s.io/apimachinery v0.25.0 k8s.io/cli-runtime v0.24.3 k8s.io/client-go v0.25.0 @@ -68,18 +65,9 @@ require ( github.com/go-errors/errors v1.0.1 // indirect github.com/go-gorp/gorp/v3 v3.0.2 // indirect github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/analysis v0.20.0 // indirect - github.com/go-openapi/errors v0.20.2 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/loads v0.20.2 // indirect - github.com/go-openapi/runtime v0.19.24 // indirect - github.com/go-openapi/spec v0.20.3 // indirect - github.com/go-openapi/strfmt v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect - github.com/go-openapi/validate v0.20.2 // indirect - github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect - github.com/go-stack/stack v1.8.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.2.0 // indirect @@ -93,7 +81,6 @@ require ( github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.12 // indirect @@ -110,10 +97,8 @@ require ( github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -137,7 +122,6 @@ require ( github.com/russross/blackfriday v1.5.2 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/spf13/cast v1.4.1 // indirect github.com/spf13/cobra v1.4.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -145,7 +129,6 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect - go.mongodb.org/mongo-driver v1.4.6 // indirect go.starlark.net v0.0.0-20200707032745-474f21a9602d // indirect golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect @@ -153,6 +136,7 @@ require ( golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect @@ -161,6 +145,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.25.0 // indirect k8s.io/apiserver v0.25.0 // indirect k8s.io/component-base v0.25.0 // indirect k8s.io/klog/v2 v2.70.1 // indirect diff --git a/cli/go.sum b/cli/go.sum index 861f2d8dc9..531cb6f202 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -90,19 +90,16 @@ github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6 github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -110,13 +107,9 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -184,7 +177,6 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= @@ -224,8 +216,6 @@ github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmV github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -245,135 +235,27 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= -github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro= -github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= -github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc= -github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= -github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4= -github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= -github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= -github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM= -github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= -github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= -github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= -github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= -github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= -github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -418,7 +300,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= @@ -467,7 +348,6 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= @@ -492,7 +372,6 @@ github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= @@ -510,8 +389,6 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcp-sdk-go v0.23.1-0.20220921131124-49168300a7dc h1:on26TCKYnX7JzZCtwkR/LWHSqMu40PoZ6h/0e6Pq8ug= -github.com/hashicorp/hcp-sdk-go v0.23.1-0.20220921131124-49168300a7dc/go.mod h1:/9UoDY2FYYA8lFaKBb2HmM/jKYZGANmf65q9QRc/cVw= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= @@ -528,11 +405,8 @@ github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -548,18 +422,13 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -569,7 +438,6 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -585,17 +453,13 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= @@ -625,8 +489,6 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -639,10 +501,6 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -663,7 +521,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -695,9 +552,6 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -741,8 +595,6 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rubenv/sql-migrate v1.1.1 h1:haR5Hn8hbW9/SpAICrXoZqXnywS7Q5WijwkQENPeNWY= @@ -753,21 +605,16 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -776,7 +623,6 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= @@ -800,11 +646,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -828,14 +669,6 @@ github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wK go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.6 h1:rh7GdYmDrb8AQSkF8yteAus8qYOgOASWDOv1BWqBXkU= -go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -866,14 +699,9 @@ go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -924,14 +752,12 @@ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -953,16 +779,13 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -997,7 +820,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1016,16 +838,11 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1111,24 +928,16 @@ golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1352,7 +1161,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/cli/helm/action.go b/cli/helm/action.go index d71014c762..df8ba5fb07 100644 --- a/cli/helm/action.go +++ b/cli/helm/action.go @@ -1,14 +1,11 @@ package helm import ( - "embed" "fmt" "os" "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" helmCLI "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/release" "k8s.io/cli-runtime/pkg/genericclioptions" ) @@ -26,83 +23,3 @@ func InitActionConfig(actionConfig *action.Configuration, namespace string, sett } return actionConfig, nil } - -// HelmActionsRunner is a thin interface over existing Helm actions that normally -// require a Kubernetes cluster. This interface allows us to mock it in tests -// and get better coverage of CLI commands. -type HelmActionsRunner interface { - // A thin wrapper around the Helm list function. - CheckForInstallations(options *CheckForInstallationsOptions) (bool, string, string, error) - // A thin wrapper around the Helm status function. - GetStatus(status *action.Status, name string) (*release.Release, error) - // A thin wrapper around the Helm install function. - Install(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) - // A thin wrapper around the LoadChart function in consul-k8s CLI that reads the charts withing the embedded fle system. - LoadChart(chart embed.FS, chartDirName string) (*chart.Chart, error) - // A thin wrapper around the Helm uninstall function. - Uninstall(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) - // A thin wrapper around the Helm upgrade function. - Upgrade(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) -} - -// ActionRunner is the implementation of HelmActionsRunner interface that -// truly calls Helm sdk functions and requires a real Kubernetes cluster. It -// is the non-mock implementation of HelmActionsRunner that is used in the CLI. -type ActionRunner struct{} - -func (h *ActionRunner) Uninstall(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) { - return uninstall.Run(name) -} - -func (h *ActionRunner) Install(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return install.Run(chrt, vals) -} - -type CheckForInstallationsOptions struct { - Settings *helmCLI.EnvSettings - ReleaseName string - DebugLog action.DebugLog - SkipErrorWhenNotFound bool -} - -// CheckForInstallations uses the helm Go SDK to find helm releases in all namespaces where the chart name is -// "consul", and returns the release name and namespace if found, or an error if not found. -func (h *ActionRunner) CheckForInstallations(options *CheckForInstallationsOptions) (bool, string, string, error) { - // Need a specific action config to call helm list, where namespace is NOT specified. - listConfig := new(action.Configuration) - if err := listConfig.Init(options.Settings.RESTClientGetter(), "", - os.Getenv("HELM_DRIVER"), options.DebugLog); err != nil { - return false, "", "", fmt.Errorf("couldn't initialize helm config: %s", err) - } - - lister := action.NewList(listConfig) - lister.AllNamespaces = true - lister.StateMask = action.ListAll - res, err := lister.Run() - if err != nil { - return false, "", "", fmt.Errorf("couldn't check for installations: %s", err) - } - - for _, rel := range res { - if rel.Chart.Metadata.Name == options.ReleaseName { - return true, rel.Name, rel.Namespace, nil - } - } - var notFoundError error - if !options.SkipErrorWhenNotFound { - notFoundError = fmt.Errorf("couldn't find installation named '%s'", options.ReleaseName) - } - return false, "", "", notFoundError -} - -func (h *ActionRunner) GetStatus(status *action.Status, name string) (*release.Release, error) { - return status.Run(name) -} - -func (h *ActionRunner) Upgrade(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return upgrade.Run(name, chart, vals) -} - -func (h *ActionRunner) LoadChart(chart embed.FS, chartDirName string) (*chart.Chart, error) { - return LoadChart(chart, chartDirName) -} diff --git a/cli/helm/chart.go b/cli/helm/chart.go index f679ca591d..1a91ee19d5 100644 --- a/cli/helm/chart.go +++ b/cli/helm/chart.go @@ -29,7 +29,7 @@ func LoadChart(chart embed.FS, chartDirName string) (*chart.Chart, error) { // FetchChartValues will attempt to fetch the values from the currently // installed Helm chart. -func FetchChartValues(actionRunner HelmActionsRunner, namespace, name string, settings *helmCLI.EnvSettings, uiLogger action.DebugLog) (map[string]interface{}, error) { +func FetchChartValues(namespace, name string, settings *helmCLI.EnvSettings, uiLogger action.DebugLog) (map[string]interface{}, error) { cfg := new(action.Configuration) cfg, err := InitActionConfig(cfg, namespace, settings, uiLogger) if err != nil { @@ -37,7 +37,7 @@ func FetchChartValues(actionRunner HelmActionsRunner, namespace, name string, se } status := action.NewStatus(cfg) - release, err := actionRunner.GetStatus(status, name) + release, err := status.Run(name) if err != nil { return nil, err } diff --git a/cli/helm/install.go b/cli/helm/install.go deleted file mode 100644 index 1bb5f3c886..0000000000 --- a/cli/helm/install.go +++ /dev/null @@ -1,140 +0,0 @@ -package helm - -import ( - "embed" - "fmt" - "time" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "golang.org/x/text/cases" - "golang.org/x/text/language" - "helm.sh/helm/v3/pkg/action" - helmCLI "helm.sh/helm/v3/pkg/cli" -) - -// InstallOptions is used when calling InstallHelmRelease. -type InstallOptions struct { - // ReleaseName is the name of the Helm release to be installed. - ReleaseName string - // ReleaseType is the helm upgrade type - consul vs consul-demo. - ReleaseType string - // Namespace is the Kubernetes namespace where the release is to be - // installed. - Namespace string - // Values the Helm chart values in a map form. - Values map[string]interface{} - // Settings is the Helm CLI environment settings. - Settings *helmCLI.EnvSettings - // Embedded chart specifies the Consul or Consul Demo Helm chart that has - // been embedded into the consul-k8s CLI. - EmbeddedChart embed.FS - // ChartDirName is the top level directory name fo the EmbeddedChart. - ChartDirName string - // UILogger is a DebugLog used to return messages from Helm to the UI. - UILogger action.DebugLog - // DryRun specifies whether the install/upgrade should actually modify the - // Kubernetes cluster. - DryRun bool - // AutoApprove will bypass any terminal prompts with an automatic yes. - AutoApprove bool - // Wait specifies whether the Helm install should wait until all pods - // are ready. - Wait bool - // Timeout is the duration that Helm will wait for the command to complete - // before it throws an error. - Timeout time.Duration - // UI is the terminal output representation that is used to prompt the user - // and output messages. - UI terminal.UI - // HelmActionsRunner is a thin interface around Helm actions for install, - // upgrade, and uninstall. - HelmActionsRunner HelmActionsRunner -} - -// InstallDemoApp will perform the following actions -// - Print out the installation summary. -// - Setup action configuration for Helm Go SDK function calls. -// - Setup the installation action. -// - Load the Helm chart. -// - Run the install. -func InstallDemoApp(options *InstallOptions) error { - options.UI.Output(fmt.Sprintf("%s Installation Summary", - cases.Title(language.English).String(common.ReleaseTypeConsulDemo)), - terminal.WithHeaderStyle()) - options.UI.Output("Name: %s", common.ConsulDemoAppReleaseName, terminal.WithInfoStyle()) - options.UI.Output("Namespace: %s", options.Settings.Namespace(), terminal.WithInfoStyle()) - options.UI.Output("\n", terminal.WithInfoStyle()) - - err := InstallHelmRelease(options) - if err != nil { - return err - } - - options.UI.Output("Accessing %s UI", cases.Title(language.English).String(common.ReleaseTypeConsulDemo), terminal.WithHeaderStyle()) - port := "8080" - portForwardCmd := fmt.Sprintf("kubectl port-forward deploy/frontend %s:80", port) - if options.Settings.Namespace() != "default" { - portForwardCmd += fmt.Sprintf(" --namespace %s", options.Settings.Namespace()) - } - options.UI.Output(portForwardCmd, terminal.WithInfoStyle()) - options.UI.Output("Browse to http://localhost:%s.", port, terminal.WithInfoStyle()) - return nil -} - -// InstallHelmRelease handles downloading the embedded helm chart, loading the -// values and runnning the Helm install command. -func InstallHelmRelease(options *InstallOptions) error { - if options.DryRun { - return nil - } - - if !options.AutoApprove { - confirmation, err := options.UI.Input(&terminal.Input{ - Prompt: "Proceed with installation? (y/N)", - Style: terminal.InfoStyle, - Secret: false, - }) - - if err != nil { - return err - } - if common.Abort(confirmation) { - options.UI.Output("Install aborted. Use the command `consul-k8s install -help` to learn how to customize your installation.", - terminal.WithInfoStyle()) - return err - } - } - - options.UI.Output("Installing %s", options.ReleaseType, terminal.WithHeaderStyle()) - - // Setup action configuration for Helm Go SDK function calls. - actionConfig := new(action.Configuration) - actionConfig, err := InitActionConfig(actionConfig, options.Namespace, options.Settings, options.UILogger) - if err != nil { - return err - } - - // Setup the installation action. - install := action.NewInstall(actionConfig) - install.ReleaseName = options.ReleaseName - install.Namespace = options.Namespace - install.CreateNamespace = true - install.Wait = options.Wait - install.Timeout = options.Timeout - - // Load the Helm chart. - chart, err := options.HelmActionsRunner.LoadChart(options.EmbeddedChart, options.ChartDirName) - if err != nil { - return err - } - options.UI.Output("Downloaded charts.", terminal.WithSuccessStyle()) - - // Run the install. - if _, err = options.HelmActionsRunner.Install(install, chart, options.Values); err != nil { - return err - } - - options.UI.Output("%s installed in namespace %q.", options.ReleaseType, options.Namespace, terminal.WithSuccessStyle()) - return nil -} diff --git a/cli/helm/install_test.go b/cli/helm/install_test.go deleted file mode 100644 index 2cd98ca5a8..0000000000 --- a/cli/helm/install_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package helm - -import ( - "bytes" - "context" - "embed" - "errors" - "testing" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmCLI "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/release" -) - -func TestInstallDemoApp(t *testing.T) { - cases := map[string]struct { - messages []string - helmActionsRunner *MockActionRunner - expectError bool - }{ - "basic success": { - messages: []string{ - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: default\n \n \n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul-namespace\".\n", - "\n==> Accessing Consul Demo Application UI\n kubectl port-forward deploy/frontend 8080:80 --namespace consul-namespace\n Browse to http://localhost:8080.\n", - }, - helmActionsRunner: &MockActionRunner{}, - }, - "failure because LoadChart returns failure": { - messages: []string{ - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: default\n \n \n\n==> Installing Consul\n", - }, - helmActionsRunner: &MockActionRunner{ - LoadChartFunc: func(chrt embed.FS, chartDirName string) (*chart.Chart, error) { - return nil, errors.New("sad trombone!") - }, - }, - expectError: true, - }, - "failure because Install returns failure": { - messages: []string{ - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: default\n \n \n\n==> Installing Consul\n", - }, - helmActionsRunner: &MockActionRunner{ - InstallFunc: func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return nil, errors.New("sad trombone!") - }, - }, - expectError: true, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - mock := tc.helmActionsRunner - options := &InstallOptions{ - HelmActionsRunner: mock, - UI: terminal.NewUI(context.Background(), buf), - UILogger: func(format string, v ...interface{}) {}, - ReleaseName: "consul-release", - ReleaseType: common.ReleaseTypeConsul, - Namespace: "consul-namespace", - Settings: helmCLI.New(), - AutoApprove: true, - } - err := InstallDemoApp(options) - if tc.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) - } -} diff --git a/cli/helm/mock.go b/cli/helm/mock.go deleted file mode 100644 index 05d3b6edb4..0000000000 --- a/cli/helm/mock.go +++ /dev/null @@ -1,136 +0,0 @@ -package helm - -import ( - "embed" - - "github.com/hashicorp/consul-k8s/cli/common" - - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/release" -) - -type MockActionRunner struct { - CheckForInstallationsFunc func(options *CheckForInstallationsOptions) (bool, string, string, error) - GetStatusFunc func(status *action.Status, name string) (*release.Release, error) - InstallFunc func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) - LoadChartFunc func(chrt embed.FS, chartDirName string) (*chart.Chart, error) - UninstallFunc func(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) - UpgradeFunc func(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) - CheckedForConsulInstallations bool - CheckedForConsulDemoInstallations bool - GotStatusConsulRelease bool - GotStatusConsulDemoRelease bool - ConsulInstalled bool - ConsulUninstalled bool - ConsulUpgraded bool - ConsulDemoInstalled bool - ConsulDemoUninstalled bool - ConsulDemoUpgraded bool -} - -func (m *MockActionRunner) Install(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - var installFunc func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) - if m.InstallFunc == nil { - installFunc = func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return &release.Release{}, nil - } - } else { - installFunc = m.InstallFunc - } - - release, err := installFunc(install, chrt, vals) - if err == nil { - if install.ReleaseName == common.DefaultReleaseName { - m.ConsulInstalled = true - } else if install.ReleaseName == common.ConsulDemoAppReleaseName { - m.ConsulDemoInstalled = true - } - } - return release, err -} - -func (m *MockActionRunner) Uninstall(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) { - var uninstallFunc func(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) - - if m.UninstallFunc == nil { - uninstallFunc = func(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) { - return &release.UninstallReleaseResponse{}, nil - } - } else { - uninstallFunc = m.UninstallFunc - } - - release, err := uninstallFunc(uninstall, name) - if err == nil { - if name == common.DefaultReleaseName { - m.ConsulUninstalled = true - } else if name == common.ConsulDemoAppReleaseName { - m.ConsulDemoUninstalled = true - } - } - return release, err -} - -func (m *MockActionRunner) CheckForInstallations(options *CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == common.DefaultReleaseName { - m.CheckedForConsulInstallations = true - } else if options.ReleaseName == common.ConsulDemoAppReleaseName { - m.CheckedForConsulDemoInstallations = true - } - - if m.CheckForInstallationsFunc == nil { - return false, "", "", nil - } - return m.CheckForInstallationsFunc(options) -} - -func (m *MockActionRunner) GetStatus(status *action.Status, name string) (*release.Release, error) { - if name == common.DefaultReleaseName { - m.GotStatusConsulRelease = true - } else if name == common.ConsulDemoAppReleaseName { - m.GotStatusConsulDemoRelease = true - } - - if m.GetStatusFunc == nil { - return &release.Release{}, nil - } - return m.GetStatusFunc(status, name) -} - -func (m *MockActionRunner) Upgrade(upgrade *action.Upgrade, name string, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - var upgradeFunc func(upgrade *action.Upgrade, name string, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) - - if m.UpgradeFunc == nil { - upgradeFunc = func(upgrade *action.Upgrade, name string, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return &release.Release{}, nil - } - } else { - upgradeFunc = m.UpgradeFunc - } - - release, err := upgradeFunc(upgrade, name, chrt, vals) - if err == nil { - if name == common.DefaultReleaseName { - m.ConsulUpgraded = true - } else if name == common.ConsulDemoAppReleaseName { - m.ConsulDemoUpgraded = true - } - } - return release, err -} - -func (m *MockActionRunner) LoadChart(chrt embed.FS, chartDirName string) (*chart.Chart, error) { - var loadChartFunc func(chrt embed.FS, chartDirName string) (*chart.Chart, error) - - if m.LoadChartFunc == nil { - loadChartFunc = func(chrt embed.FS, chartDirName string) (*chart.Chart, error) { - return &chart.Chart{}, nil - } - } else { - loadChartFunc = m.LoadChartFunc - } - - release, err := loadChartFunc(chrt, chartDirName) - return release, err -} diff --git a/cli/helm/upgrade.go b/cli/helm/upgrade.go deleted file mode 100644 index d2b8523c5f..0000000000 --- a/cli/helm/upgrade.go +++ /dev/null @@ -1,149 +0,0 @@ -package helm - -import ( - "embed" - "strings" - "time" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "golang.org/x/text/cases" - "golang.org/x/text/language" - "helm.sh/helm/v3/pkg/action" - helmCLI "helm.sh/helm/v3/pkg/cli" -) - -// UpgradeOptions is used when calling UpgradeHelmRelease. -type UpgradeOptions struct { - // ReleaseName is the name of the installed Helm release to upgrade. - ReleaseName string - // ReleaseType is the helm upgrade type - consul vs consul-demo. - ReleaseType string - // ReleaseTypeName is a user friendly version of ReleaseType. The values - // are consul and consul demo application. - ReleaseTypeName string - // Namespace is the Kubernetes namespace where the release is installed. - Namespace string - // Values the Helm chart values in a map form. - Values map[string]interface{} - // Settings is the Helm CLI environment settings. - Settings *helmCLI.EnvSettings - // Embedded chart specifies the Consul or Consul Demo Helm chart that has - // been embedded into the consul-k8s CLI. - EmbeddedChart embed.FS - // ChartDirName is the top level directory name fo the EmbeddedChart. - ChartDirName string - // UILogger is a DebugLog used to return messages from Helm to the UI. - UILogger action.DebugLog - // DryRun specifies whether the upgrade should actually modify the - // Kubernetes cluster. - DryRun bool - // AutoApprove will bypass any terminal prompts with an automatic yes. - AutoApprove bool - // Wait specifies whether the Helm install should wait until all pods - // are ready. - Wait bool - // Timeout is the duration that Helm will wait for the command to complete - // before it throws an error. - Timeout time.Duration - // UI is the terminal output representation that is used to prompt the user - // and output messages. - UI terminal.UI - // HelmActionsRunner is a thin interface around Helm actions for install, - // upgrade, and uninstall. - HelmActionsRunner HelmActionsRunner -} - -// UpgradeHelmRelease handles downloading the embedded helm chart, loading the -// values, showing the diff between new and installed values, and runnning the -// Helm install command. -func UpgradeHelmRelease(options *UpgradeOptions) error { - options.UI.Output("%s Upgrade Summary", cases.Title(language.English).String(options.ReleaseTypeName), terminal.WithHeaderStyle()) - - chart, err := options.HelmActionsRunner.LoadChart(options.EmbeddedChart, options.ChartDirName) - if err != nil { - return err - } - options.UI.Output("Downloaded charts.", terminal.WithSuccessStyle()) - - currentChartValues, err := FetchChartValues(options.HelmActionsRunner, - options.Namespace, options.ReleaseName, options.Settings, options.UILogger) - if err != nil { - return err - } - - // Print out the upgrade summary. - if err = printDiff(currentChartValues, options.Values, options.UI); err != nil { - options.UI.Output("Could not print the different between current and upgraded charts: %v", err, terminal.WithErrorStyle()) - return err - } - - // Check if the user is OK with the upgrade unless the auto approve or dry run flags are true. - if !options.AutoApprove && !options.DryRun { - confirmation, err := options.UI.Input(&terminal.Input{ - Prompt: "Proceed with upgrade? (y/N)", - Style: terminal.InfoStyle, - Secret: false, - }) - - if err != nil { - return err - } - if common.Abort(confirmation) { - options.UI.Output("Upgrade aborted. Use the command `consul-k8s upgrade -help` to learn how to customize your upgrade.", - terminal.WithInfoStyle()) - return err - } - } - - if !options.DryRun { - options.UI.Output("Upgrading %s", options.ReleaseTypeName, terminal.WithHeaderStyle()) - } else { - options.UI.Output("Performing Dry Run Upgrade", terminal.WithHeaderStyle()) - return nil - } - - // Setup action configuration for Helm Go SDK function calls. - actionConfig := new(action.Configuration) - actionConfig, err = InitActionConfig(actionConfig, options.Namespace, options.Settings, options.UILogger) - if err != nil { - return err - } - - // Setup the upgrade action. - upgrade := action.NewUpgrade(actionConfig) - upgrade.Namespace = options.Namespace - upgrade.DryRun = options.DryRun - upgrade.Wait = options.Wait - upgrade.Timeout = options.Timeout - - // Run the upgrade. Note that the dry run config is passed into the upgrade action, so upgrade.Run is called even during a dry run. - _, err = options.HelmActionsRunner.Upgrade(upgrade, options.ReleaseName, chart, options.Values) - if err != nil { - return err - } - options.UI.Output("%s upgraded in namespace %q.", cases.Title(language.English).String(options.ReleaseTypeName), options.Namespace, terminal.WithSuccessStyle()) - return nil -} - -// printDiff marshals both maps to YAML and prints the diff between the two. -func printDiff(old, new map[string]interface{}, ui terminal.UI) error { - diff, err := common.Diff(old, new) - if err != nil { - return err - } - - ui.Output("\nDifference between user overrides for current and upgraded charts"+ - "\n--------------------------------------------------------------", terminal.WithInfoStyle()) - for _, line := range strings.Split(diff, "\n") { - if strings.HasPrefix(line, "+") { - ui.Output(line, terminal.WithDiffAddedStyle()) - } else if strings.HasPrefix(line, "-") { - ui.Output(line, terminal.WithDiffRemovedStyle()) - } else { - ui.Output(line, terminal.WithDiffUnchangedStyle()) - } - } - - return nil -} diff --git a/cli/helm/upgrade_test.go b/cli/helm/upgrade_test.go deleted file mode 100644 index 9ffb7dc201..0000000000 --- a/cli/helm/upgrade_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package helm - -import ( - "bytes" - "context" - "embed" - "errors" - "testing" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmCLI "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/release" -) - -func TestUpgrade(t *testing.T) { - buf := new(bytes.Buffer) - mock := &MockActionRunner{ - CheckForInstallationsFunc: func(options *CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return false, "", "", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - } - - options := &UpgradeOptions{ - HelmActionsRunner: mock, - UI: terminal.NewUI(context.Background(), buf), - UILogger: func(format string, v ...interface{}) {}, - ReleaseName: "consul-release", - ReleaseType: common.ReleaseTypeConsul, - Namespace: "consul-namespace", - Settings: helmCLI.New(), - AutoApprove: true, - } - - expectedMessages := []string{ - "\n==> Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n \n", - "\n==> Upgrading \n ✓ upgraded in namespace \"consul-namespace\".\n", - } - err := UpgradeHelmRelease(options) - require.NoError(t, err) - output := buf.String() - for _, msg := range expectedMessages { - require.Contains(t, output, msg) - } -} - -func TestUpgradeHelmRelease(t *testing.T) { - cases := map[string]struct { - messages []string - helmActionsRunner *MockActionRunner - expectError bool - }{ - "basic success": { - messages: []string{ - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n --------------------------------------------------------------\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul-namespace\".\n", - }, - helmActionsRunner: &MockActionRunner{}, - }, - "failure because LoadChart returns failure": { - messages: []string{ - "\n==> Consul Upgrade Summary\n", - }, - helmActionsRunner: &MockActionRunner{ - LoadChartFunc: func(chrt embed.FS, chartDirName string) (*chart.Chart, error) { - return nil, errors.New("sad trombone!") - }, - }, - expectError: true, - }, - "failure because Upgrade returns failure": { - messages: []string{ - "\n==> Consul Upgrade Summary\n", - }, - helmActionsRunner: &MockActionRunner{ - UpgradeFunc: func(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return nil, errors.New("sad trombone!") - }, - }, - expectError: true, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - mock := tc.helmActionsRunner - options := &UpgradeOptions{ - HelmActionsRunner: mock, - UI: terminal.NewUI(context.Background(), buf), - UILogger: func(format string, v ...interface{}) {}, - ReleaseName: "consul-release", - ReleaseType: common.ReleaseTypeConsul, - ReleaseTypeName: common.ReleaseTypeConsul, - Namespace: "consul-namespace", - Settings: helmCLI.New(), - AutoApprove: true, - } - err := UpgradeHelmRelease(options) - if tc.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) - } -} diff --git a/cli/preset/cloud_preset.go b/cli/preset/cloud_preset.go deleted file mode 100644 index 95219cb378..0000000000 --- a/cli/preset/cloud_preset.go +++ /dev/null @@ -1,431 +0,0 @@ -package preset - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/consul-k8s/cli/config" - "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models" - "github.com/hashicorp/hcp-sdk-go/httpclient" - "github.com/hashicorp/hcp-sdk-go/resource" - - hcpgnm "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/client/global_network_manager_service" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -const ( - secretNameHCPClientID = "consul-hcp-client-id" - secretKeyHCPClientID = "client-id" - secretNameHCPClientSecret = "consul-hcp-client-secret" - secretKeyHCPClientSecret = "client-secret" - secretNameHCPResourceID = "consul-hcp-resource-id" - secretKeyHCPResourceID = "resource-id" - secretNameHCPAPIHostname = "consul-hcp-api-host" - secretKeyHCPAPIHostname = "api-hostname" - secretNameHCPAuthURL = "consul-hcp-auth-url" - secretKeyHCPAuthURL = "auth-url" - secretNameHCPScadaAddress = "consul-hcp-scada-address" - secretKeyHCPScadaAddress = "scada-address" - secretNameGossipKey = "consul-gossip-key" - secretKeyGossipKey = "key" - secretNameBootstrapToken = "consul-bootstrap-token" - secretKeyBootstrapToken = "token" - secretNameServerCA = "consul-server-ca" - secretNameServerCert = "consul-server-cert" -) - -// CloudBootstrapConfig represents the response fetched from the agent -// bootstrap config endpoint in HCP. -type CloudBootstrapConfig struct { - BootstrapResponse *models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse - ConsulConfig ConsulConfig - HCPConfig HCPConfig -} - -// HCPConfig represents the resource-id, client-id, and client-secret -// provided by the user in order to make a call to fetch the agent bootstrap -// config data from the endpoint in HCP. -type HCPConfig struct { - ResourceID string - ClientID string - ClientSecret string - AuthURL string - APIHostname string - ScadaAddress string -} - -// ConsulConfig represents 'cluster.consul_config' in the response -// fetched from the agent bootstrap config endpoint in HCP. -type ConsulConfig struct { - ACL ACL `json:"acl"` -} - -// ACL represents 'cluster.consul_config.acl' in the response -// fetched from the agent bootstrap config endpoint in HCP. -type ACL struct { - Tokens Tokens `json:"tokens"` -} - -// Tokens represents 'cluster.consul_config.acl.tokens' in the -// response fetched from the agent bootstrap config endpoint in HCP. -type Tokens struct { - Agent string `json:"agent"` - InitialManagement string `json:"initial_management"` -} - -// CloudPreset struct is an implementation of the Preset interface that is used -// to fetch agent bootrap config from HCP, save it to secrets, and provide a -// Helm values map that is used during installation. -type CloudPreset struct { - HCPConfig *HCPConfig - KubernetesClient kubernetes.Interface - KubernetesNamespace string - UI terminal.UI - SkipSavingSecrets bool - Context context.Context - HTTPClient *http.Client -} - -// GetValueMap must fetch configuration from HCP, save various secrets from -// the response, and map the secret names into the returned value map. -func (c *CloudPreset) GetValueMap() (map[string]interface{}, error) { - bootstrapConfig, err := c.fetchAgentBootstrapConfig() - if err != nil { - return nil, err - } - - if !c.SkipSavingSecrets { - err = c.saveSecretsFromBootstrapConfig(bootstrapConfig) - if err != nil { - return nil, err - } - } - - return c.getHelmConfigWithMapSecretNames(bootstrapConfig), nil -} - -// fetchAgentBootstrapConfig use the resource-id, client-id, and client-secret -// to call to the agent bootstrap config endpoint and parse the response into a -// CloudBootstrapConfig struct. -func (c *CloudPreset) fetchAgentBootstrapConfig() (*CloudBootstrapConfig, error) { - c.UI.Output("Fetching Consul cluster configuration from HCP", terminal.WithHeaderStyle()) - httpClientCfg := httpclient.Config{} - clientRuntime, err := httpclient.New(httpClientCfg) - if err != nil { - return nil, err - } - - hcpgnmClient := hcpgnm.New(clientRuntime, nil) - clusterResource, err := resource.FromString(c.HCPConfig.ResourceID) - if err != nil { - return nil, err - } - - params := hcpgnm.NewAgentBootstrapConfigParamsWithContext(c.Context). - WithID(clusterResource.ID). - WithLocationOrganizationID(clusterResource.Organization). - WithLocationProjectID(clusterResource.Project). - WithHTTPClient(c.HTTPClient) - - resp, err := hcpgnmClient.AgentBootstrapConfig(params, nil) - if err != nil { - return nil, err - } - - bootstrapConfig := resp.GetPayload() - c.UI.Output("HCP configuration successfully fetched.", terminal.WithSuccessStyle()) - - return c.parseBootstrapConfigResponse(bootstrapConfig) -} - -// parseBootstrapConfigResponse unmarshals the boostrap parseBootstrapConfigResponse -// and also sets the HCPConfig values to return CloudBootstrapConfig struct. -func (c *CloudPreset) parseBootstrapConfigResponse(bootstrapRepsonse *models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse) (*CloudBootstrapConfig, error) { - var cbc CloudBootstrapConfig - var consulConfig ConsulConfig - err := json.Unmarshal([]byte(bootstrapRepsonse.Bootstrap.ConsulConfig), &consulConfig) - if err != nil { - return nil, err - } - cbc.ConsulConfig = consulConfig - cbc.HCPConfig = *c.HCPConfig - cbc.BootstrapResponse = bootstrapRepsonse - - return &cbc, nil -} - -func getOptionalSecretFromHCPConfig(hcpConfigValue, valuesConfigKey, secretName, secretKey string) string { - if hcpConfigValue != "" { - // Need to make sure the below has strict spaces and no tabs - return fmt.Sprintf(`%s: - secretName: %s - secretKey: %s - `, valuesConfigKey, secretName, secretKey) - } - return "" -} - -// getHelmConfigWithMapSecretNames maps the secret names were agent bootstrap -// config values have been saved, maps them into the Helm values template for -// the cloud preset, and returns the value map. -func (c *CloudPreset) getHelmConfigWithMapSecretNames(cfg *CloudBootstrapConfig) map[string]interface{} { - apiHostCfg := getOptionalSecretFromHCPConfig(cfg.HCPConfig.APIHostname, "apiHost", secretNameHCPAPIHostname, secretKeyHCPAPIHostname) - authURLCfg := getOptionalSecretFromHCPConfig(cfg.HCPConfig.AuthURL, "authUrl", secretNameHCPAuthURL, secretKeyHCPAuthURL) - scadaAddressCfg := getOptionalSecretFromHCPConfig(cfg.HCPConfig.ScadaAddress, "scadaAddress", secretNameHCPScadaAddress, secretKeyHCPScadaAddress) - - // Need to make sure the below has strict spaces and no tabs - values := fmt.Sprintf(` -global: - datacenter: %s - tls: - enabled: true - enableAutoEncrypt: true - caCert: - secretName: %s - secretKey: %s - gossipEncryption: - secretName: %s - secretKey: %s - acls: - manageSystemACLs: true - bootstrapToken: - secretName: %s - secretKey: %s - cloud: - enabled: true - resourceId: - secretName: %s - secretKey: %s - clientId: - secretName: %s - secretKey: %s - clientSecret: - secretName: %s - secretKey: %s - %s - %s - %s -server: - replicas: %d - affinity: null - serverCert: - secretName: %s -connectInject: - enabled: true -controller: - enabled: true -`, cfg.BootstrapResponse.Cluster.ID, secretNameServerCA, corev1.TLSCertKey, - secretNameGossipKey, secretKeyGossipKey, secretNameBootstrapToken, - secretKeyBootstrapToken, - secretNameHCPResourceID, secretKeyHCPResourceID, - secretNameHCPClientID, secretKeyHCPClientID, - secretNameHCPClientSecret, secretKeyHCPClientSecret, - apiHostCfg, authURLCfg, scadaAddressCfg, - cfg.BootstrapResponse.Cluster.BootstrapExpect, secretNameServerCert) - valuesMap := config.ConvertToMap(values) - return valuesMap -} - -// saveSecretsFromBootstrapConfig takes the following items from the -// agent bootstrap config from HCP and saves them into known secret names and -// keys: -// - HCP configresource-id. -// - HCP client-id. -// - HCP client-secret. -// - HCP auth URL (optional) -// - HCP api hostname (optional) -// - HCP scada address (optional) -// - ACL bootstrap token. -// - gossip encryption key. -// - server tls cert and key. -// - server CA cert. -func (c *CloudPreset) saveSecretsFromBootstrapConfig(config *CloudBootstrapConfig) error { - // create namespace - if err := c.createNamespaceIfNotExists(); err != nil { - return err - } - - // HCP resource id - if config.HCPConfig.ResourceID != "" { - data := map[string][]byte{ - secretKeyHCPResourceID: []byte(config.HCPConfig.ResourceID), - } - if err := c.saveSecret(secretNameHCPResourceID, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP resource id saved in '%s' secret in namespace '%s'.", - secretKeyHCPResourceID, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // HCP client id - if config.HCPConfig.ClientID != "" { - data := map[string][]byte{ - secretKeyHCPClientID: []byte(config.HCPConfig.ClientID), - } - if err := c.saveSecret(secretNameHCPClientID, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP client id saved in '%s' secret in namespace '%s'.", - secretKeyHCPClientID, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // HCP client secret - if config.HCPConfig.ClientSecret != "" { - data := map[string][]byte{ - secretKeyHCPClientSecret: []byte(config.HCPConfig.ClientSecret), - } - if err := c.saveSecret(secretNameHCPClientSecret, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP client secret saved in '%s' secret in namespace '%s'.", - secretKeyHCPClientSecret, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // bootstrap token - if config.ConsulConfig.ACL.Tokens.InitialManagement != "" { - data := map[string][]byte{ - secretKeyBootstrapToken: []byte(config.ConsulConfig.ACL.Tokens.InitialManagement), - } - if err := c.saveSecret(secretNameBootstrapToken, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("ACL bootstrap token saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyBootstrapToken, secretNameBootstrapToken, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // gossip key - if config.BootstrapResponse.Bootstrap.GossipKey != "" { - data := map[string][]byte{ - secretKeyGossipKey: []byte(config.BootstrapResponse.Bootstrap.GossipKey), - } - if err := c.saveSecret(secretNameGossipKey, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("Gossip encryption key saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyGossipKey, secretNameGossipKey, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // server cert secret - if config.BootstrapResponse.Bootstrap.ServerTLS.Cert != "" { - data := map[string][]byte{ - corev1.TLSCertKey: []byte(config.BootstrapResponse.Bootstrap.ServerTLS.Cert), - corev1.TLSPrivateKeyKey: []byte(config.BootstrapResponse.Bootstrap.ServerTLS.PrivateKey), - } - if err := c.saveSecret(secretNameServerCert, data, corev1.SecretTypeTLS); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("Server TLS cert and key saved as '%s' and '%s' key in '%s secret in namespace '%s'.", - corev1.TLSCertKey, corev1.TLSPrivateKeyKey, secretNameServerCert, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // server CA - if len(config.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities) > 0 && - config.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities[0] != "" { - data := map[string][]byte{ - corev1.TLSCertKey: []byte(config.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities[0]), - } - if err := c.saveSecret(secretNameServerCA, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("Server TLS CA saved as '%s' key in '%s' secret in namespace '%s'.", - corev1.TLSCertKey, secretNameServerCA, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - // Optional secrets - // HCP auth url - if config.HCPConfig.AuthURL != "" { - data := map[string][]byte{ - secretKeyHCPAuthURL: []byte(config.HCPConfig.AuthURL), - } - if err := c.saveSecret(secretNameHCPAuthURL, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP auth url saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyHCPAuthURL, secretNameHCPAuthURL, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // HCP api hostname - if config.HCPConfig.APIHostname != "" { - data := map[string][]byte{ - secretKeyHCPAPIHostname: []byte(config.HCPConfig.APIHostname), - } - if err := c.saveSecret(secretNameHCPAPIHostname, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP api hostname saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyHCPAPIHostname, secretNameHCPAPIHostname, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // HCP scada address - if config.HCPConfig.ScadaAddress != "" { - data := map[string][]byte{ - secretKeyHCPScadaAddress: []byte(config.HCPConfig.ScadaAddress), - } - if err := c.saveSecret(secretNameHCPScadaAddress, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP scada address saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyHCPScadaAddress, secretNameHCPScadaAddress, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - return nil -} - -// createNamespaceIfNotExists checks to see if a given namespace exists and if -// it does not will create it. This function is needed to ensure a namespace -// exists before HCP config secrets are saved. -func (c *CloudPreset) createNamespaceIfNotExists() error { - c.UI.Output(fmt.Sprintf("Checking if %s namespace needs to be created", c.KubernetesNamespace), terminal.WithHeaderStyle()) - // Create k8s namespace if it doesn't exist. - _, err := c.KubernetesClient.CoreV1().Namespaces().Get(context.Background(), c.KubernetesNamespace, metav1.GetOptions{}) - if k8serrors.IsNotFound(err) { - _, err = c.KubernetesClient.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.KubernetesNamespace, - }, - }, metav1.CreateOptions{}) - if err != nil { - return err - } - c.UI.Output(fmt.Sprintf("Namespace '%s' has been created.", c.KubernetesNamespace), terminal.WithSuccessStyle()) - - } else if err != nil { - return err - } else { - c.UI.Output(fmt.Sprintf("Namespace '%s' already exists.", c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - return nil -} - -// saveSecret saves given key value pairs into a given secret in a given -// namespace. It is the generic function that helps saves all of the specific -// cloud preset secrets. -func (c *CloudPreset) saveSecret(secretName string, kvps map[string][]byte, secretType corev1.SecretType) error { - _, err := c.KubernetesClient.CoreV1().Secrets(c.KubernetesNamespace).Get(context.Background(), secretName, metav1.GetOptions{}) - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: c.KubernetesNamespace, - Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, - }, - Data: kvps, - Type: secretType, - } - if k8serrors.IsNotFound(err) { - _, err = c.KubernetesClient.CoreV1().Secrets(c.KubernetesNamespace).Create(context.Background(), secret, metav1.CreateOptions{}) - if err != nil { - return err - } - } else if err != nil { - return err - } else { - return fmt.Errorf("'%s' secret in '%s' namespace already exists", secretName, c.KubernetesNamespace) - } - return nil -} diff --git a/cli/preset/cloud_preset_test.go b/cli/preset/cloud_preset_test.go deleted file mode 100644 index 946e1ca158..0000000000 --- a/cli/preset/cloud_preset_test.go +++ /dev/null @@ -1,701 +0,0 @@ -package preset - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "os" - "testing" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "sigs.k8s.io/yaml" -) - -const ( - hcpClientID = "RAxJflDbxDXw8kLY6jWmwqMz3kVe7NnL" - hcpClientSecret = "1fNzurLatQPLPwf7jnD4fRtU9f5nH31RKBHayy08uQ6P-6nwI1rFZjMXb4m3cCKH" - hcpResourceID = "organization/ccbdd191-5dc3-4a73-9e05-6ac30ca67992/project/36019e0d-ed59-4df6-9990-05bb7fc793b6/hashicorp.consul.global-network-manager.cluster/prod-on-prem" - expectedSecretNameHCPClientId = "consul-hcp-client-id" - expectedSecretNameHCPClientSecret = "consul-hcp-client-secret" - expectedSecretNameHCPResourceId = "consul-hcp-resource-id" - expectedSecretNameHCPAuthURL = "consul-hcp-auth-url" - expectedSecretNameHCPApiHostname = "consul-hcp-api-host" - expectedSecretNameHCPScadaAddress = "consul-hcp-scada-address" - expectedSecretNameGossipKey = "consul-gossip-key" - expectedSecretNameBootstrap = "consul-bootstrap-token" - expectedSecretNameServerCA = "consul-server-ca" - expectedSecretNameServerCert = "consul-server-cert" - namespace = "consul" - validResponse = ` -{ - "cluster": - { - "id": "dc1", - "bootstrap_expect" : 3 - }, - "bootstrap": - { - "gossip_key": "Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=", - "server_tls": { - "certificate_authorities": [ - "-----BEGIN CERTIFICATE-----\nMIIC6TCCAo+gAwIBAgIQA3pUmJcy9uw8MNIDZPiaZjAKBggqhkjOPQQDAjCBtzEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT4wPAYDVQQDEzVDb25zdWwgQWdlbnQgQ0Eg\nNDYyMjg2MDAxNTk3NzI1NDMzMTgxNDQ4OTAzODMyNjg5NzI1NDAeFw0yMjAzMjkx\nMTEyNDNaFw0yNzAzMjgxMTEyNDNaMIG3MQswCQYDVQQGEwJVUzELMAkGA1UECBMC\nQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xGjAYBgNVBAkTETEwMSBTZWNvbmQg\nU3RyZWV0MQ4wDAYDVQQREwU5NDEwNTEXMBUGA1UEChMOSGFzaGlDb3JwIEluYy4x\nPjA8BgNVBAMTNUNvbnN1bCBBZ2VudCBDQSA0NjIyODYwMDE1OTc3MjU0MzMxODE0\nNDg5MDM4MzI2ODk3MjU0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAERs73JA+K\n9xMorTz6fA5x8Dmin6l8pNgka3/Ye3SFWJD/0lKFTXEX7Li8+hXG31WMLdXgoWHS\nkL1HoLboV8hEAKN7MHkwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8w\nKQYDVR0OBCIEICst9kpfDK0LtEbUghWf4ahjpzd7Mlh07OLT/e38PKDmMCsGA1Ud\nIwQkMCKAICst9kpfDK0LtEbUghWf4ahjpzd7Mlh07OLT/e38PKDmMAoGCCqGSM49\nBAMCA0gAMEUCIQCuk/n49np4m76jTFLk2zeiSi7UfubMeS2BD4bkMt6v/wIgbO0R\npTqCOYQr3cji1EpEQca95VCZ26lBEjqLQF3osGc=\n-----END CERTIFICATE-----\n" - ], - "private_key": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIA+DFWCFz+SujFCuWM3GpoTLPX8igerwMw+8efNbx7a+oAoGCCqGSM49\nAwEHoUQDQgAE7LdWJpna88mohlnuTyGJ+WZ3P6BCxGqBRWNJn3+JEoHhmaifx7Sq\nWLMCEB1UNbH5Z1esaS4h33Gb0pyyiCy19A==\n-----END EC PRIVATE KEY-----\n", - "cert": "-----BEGIN CERTIFICATE-----\nMIICmzCCAkGgAwIBAgIRAKZ77a2h+plK2yXFsW0kfgAwCgYIKoZIzj0EAwIwgbcx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE+MDwGA1UEAxM1Q29uc3VsIEFnZW50IENB\nIDQ2MjI4NjAwMTU5NzcyNTQzMzE4MTQ0ODkwMzgzMjY4OTcyNTQwHhcNMjIwMzI5\nMTExMjUwWhcNMjMwMzI5MTExMjUwWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABOy3ViaZ2vPJqIZZ7k8hiflm\ndz+gQsRqgUVjSZ9/iRKB4Zmon8e0qlizAhAdVDWx+WdXrGkuId9xm9KcsogstfSj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDaH9x1CRRqM5BYCMKBnAFyZjQq\nSY9IcJnhZUZIIJHU4jArBgNVHSMEJDAigCArLfZKXwytC7RG1IIVn+GoY6c3ezJY\ndOzi0/3t/Dyg5jAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQCOxQHGF2483Cdd9nXcqAoOcxYP\nIqNP/WM03qyERyYNNQIgbtFBLIAgrhdXdjEvHMjU5ceHSwle/K0p0OTSIwSk8xI=\n-----END CERTIFICATE-----\n" - }, - "consul_config": "{\"acl\":{\"default_policy\":\"deny\",\"enable_token_persistence\":true,\"enabled\":true,\"tokens\":{\"agent\":\"74044c72-03c8-42b0-b57f-728bb22ca7fb\",\"initial_management\":\"74044c72-03c8-42b0-b57f-728bb22ca7fb\"}},\"auto_encrypt\":{\"allow_tls\":true},\"bootstrap_expect\":1,\"encrypt\":\"yUPhgtteok1/bHoVIoRnJMfOrKrb1TDDyWJRh9rlUjg=\",\"encrypt_verify_incoming\":true,\"encrypt_verify_outgoing\":true,\"ports\":{\"http\":-1,\"https\":8501},\"retry_join\":[],\"verify_incoming\":true,\"verify_outgoing\":true,\"verify_server_hostname\":true}" - } -}` -) - -var validBootstrapReponse *models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse = &models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse{ - Bootstrap: &models.HashicorpCloudGlobalNetworkManager20220215ClusterBootstrap{ - ID: "dc1", - GossipKey: "Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=", - BootstrapExpect: 3, - ServerTLS: &models.HashicorpCloudGlobalNetworkManager20220215ServerTLS{ - CertificateAuthorities: []string{"-----BEGIN CERTIFICATE-----\nMIIC6TCCAo+gAwIBAgIQA3pUmJcy9uw8MNIDZPiaZjAKBggqhkjOPQQDAjCBtzEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT4wPAYDVQQDEzVDb25zdWwgQWdlbnQgQ0Eg\nNDYyMjg2MDAxNTk3NzI1NDMzMTgxNDQ4OTAzODMyNjg5NzI1NDAeFw0yMjAzMjkx\nMTEyNDNaFw0yNzAzMjgxMTEyNDNaMIG3MQswCQYDVQQGEwJVUzELMAkGA1UECBMC\nQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xGjAYBgNVBAkTETEwMSBTZWNvbmQg\nU3RyZWV0MQ4wDAYDVQQREwU5NDEwNTEXMBUGA1UEChMOSGFzaGlDb3JwIEluYy4x\nPjA8BgNVBAMTNUNvbnN1bCBBZ2VudCBDQSA0NjIyODYwMDE1OTc3MjU0MzMxODE0\nNDg5MDM4MzI2ODk3MjU0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAERs73JA+K\n9xMorTz6fA5x8Dmin6l8pNgka3/Ye3SFWJD/0lKFTXEX7Li8+hXG31WMLdXgoWHS\nkL1HoLboV8hEAKN7MHkwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8w\nKQYDVR0OBCIEICst9kpfDK0LtEbUghWf4ahjpzd7Mlh07OLT/e38PKDmMCsGA1Ud\nIwQkMCKAICst9kpfDK0LtEbUghWf4ahjpzd7Mlh07OLT/e38PKDmMAoGCCqGSM49\nBAMCA0gAMEUCIQCuk/n49np4m76jTFLk2zeiSi7UfubMeS2BD4bkMt6v/wIgbO0R\npTqCOYQr3cji1EpEQca95VCZ26lBEjqLQF3osGc=\n-----END CERTIFICATE-----\n"}, - PrivateKey: "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIA+DFWCFz+SujFCuWM3GpoTLPX8igerwMw+8efNbx7a+oAoGCCqGSM49\nAwEHoUQDQgAE7LdWJpna88mohlnuTyGJ+WZ3P6BCxGqBRWNJn3+JEoHhmaifx7Sq\nWLMCEB1UNbH5Z1esaS4h33Gb0pyyiCy19A==\n-----END EC PRIVATE KEY-----\n", - Cert: "-----BEGIN CERTIFICATE-----\nMIICmzCCAkGgAwIBAgIRAKZ77a2h+plK2yXFsW0kfgAwCgYIKoZIzj0EAwIwgbcx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE+MDwGA1UEAxM1Q29uc3VsIEFnZW50IENB\nIDQ2MjI4NjAwMTU5NzcyNTQzMzE4MTQ0ODkwMzgzMjY4OTcyNTQwHhcNMjIwMzI5\nMTExMjUwWhcNMjMwMzI5MTExMjUwWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABOy3ViaZ2vPJqIZZ7k8hiflm\ndz+gQsRqgUVjSZ9/iRKB4Zmon8e0qlizAhAdVDWx+WdXrGkuId9xm9KcsogstfSj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDaH9x1CRRqM5BYCMKBnAFyZjQq\nSY9IcJnhZUZIIJHU4jArBgNVHSMEJDAigCArLfZKXwytC7RG1IIVn+GoY6c3ezJY\ndOzi0/3t/Dyg5jAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQCOxQHGF2483Cdd9nXcqAoOcxYP\nIqNP/WM03qyERyYNNQIgbtFBLIAgrhdXdjEvHMjU5ceHSwle/K0p0OTSIwSk8xI=\n-----END CERTIFICATE-----\n"}, - ConsulConfig: "{\"acl\":{\"default_policy\":\"deny\",\"enable_token_persistence\":true,\"enabled\":true,\"tokens\":{\"agent\":\"74044c72-03c8-42b0-b57f-728bb22ca7fb\",\"initial_management\":\"74044c72-03c8-42b0-b57f-728bb22ca7fb\"}},\"auto_encrypt\":{\"allow_tls\":true},\"bootstrap_expect\":1,\"encrypt\":\"yUPhgtteok1/bHoVIoRnJMfOrKrb1TDDyWJRh9rlUjg=\",\"encrypt_verify_incoming\":true,\"encrypt_verify_outgoing\":true,\"ports\":{\"http\":-1,\"https\":8501},\"retry_join\":[],\"verify_incoming\":true,\"verify_outgoing\":true,\"verify_server_hostname\":true}", - }, - Cluster: &models.HashicorpCloudGlobalNetworkManager20220215Cluster{ - ID: "dc1", - BootstrapExpect: 3, - }, -} - -var hcpConfig *HCPConfig = &HCPConfig{ - ResourceID: hcpResourceID, - ClientID: hcpClientID, - ClientSecret: hcpClientSecret, - AuthURL: "https://foobar", - APIHostname: "https://foo.bar", - ScadaAddress: "10.10.10.10", -} - -var validBootstrapConfig *CloudBootstrapConfig = &CloudBootstrapConfig{ - HCPConfig: *hcpConfig, - ConsulConfig: ConsulConfig{ - ACL: ACL{ - Tokens: Tokens{ - Agent: "74044c72-03c8-42b0-b57f-728bb22ca7fb", - InitialManagement: "74044c72-03c8-42b0-b57f-728bb22ca7fb", - }, - }, - }, - BootstrapResponse: validBootstrapReponse, -} - -func TestGetValueMap(t *testing.T) { - // Create fake k8s. - k8s := fake.NewSimpleClientset() - namespace := "consul" - - // Start the mock HCP server. - hcpMockServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("content-type", "application/json") - if r != nil && r.URL.Path == "/global-network-manager/2022-02-15/organizations/ccbdd191-5dc3-4a73-9e05-6ac30ca67992/projects/36019e0d-ed59-4df6-9990-05bb7fc793b6/clusters/prod-on-prem/agent/bootstrap_config" && - r.Method == "GET" { - w.Write([]byte(validResponse)) - } else { - w.Write([]byte(` - { - "access_token": "dummy-token" - } - `)) - } - })) - hcpMockServer.StartTLS() - t.Cleanup(hcpMockServer.Close) - mockServerURL, err := url.Parse(hcpMockServer.URL) - require.NoError(t, err) - os.Setenv("HCP_AUTH_URL", hcpMockServer.URL) - os.Setenv("HCP_API_HOST", mockServerURL.Host) - os.Setenv("HCP_CLIENT_ID", "fGY34fkOxcQmpkcygQmGHQZkEcLDhBde") - os.Setenv("HCP_CLIENT_SECRET", "8EWngREObMe90HNDN6oQv3YKQlRtVkg-28AgZylz1en0DHwyiE2pYCbwi61oF8dr") - bsConfig := getDeepCopyOfValidBootstrapConfig() - bsConfig.HCPConfig.APIHostname = mockServerURL.Host - bsConfig.HCPConfig.AuthURL = hcpMockServer.URL - - testCases := []struct { - description string - installer *CloudPreset - postProcessingFunc func() - requireCheck func() - }{ - { - "Should save secrets when SkipSavingSecrets is false.", - &CloudPreset{ - HCPConfig: &bsConfig.HCPConfig, - KubernetesClient: k8s, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - HTTPClient: hcpMockServer.Client(), - Context: context.Background(), - }, - func() { - deleteSecrets(k8s) - }, - func() { - checkAllSecretsWereSaved(t, k8s, bsConfig) - }, - }, - { - "Should not save secrets when SkipSavingSecrets is true.", - &CloudPreset{ - HCPConfig: &bsConfig.HCPConfig, - KubernetesClient: k8s, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - SkipSavingSecrets: true, - HTTPClient: hcpMockServer.Client(), - Context: context.Background(), - }, - func() { - deleteSecrets(k8s) - }, - func() { - checkAllSecretsWereSaved(t, k8s, bsConfig) - }, - }, - { - "Should not save save api-hostname, scada-address, or auth-url keys as empty strings if they are not configured.", - &CloudPreset{ - HCPConfig: &HCPConfig{ - ResourceID: hcpResourceID, - ClientID: hcpClientID, - ClientSecret: hcpClientSecret, - }, - KubernetesClient: k8s, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - SkipSavingSecrets: false, - HTTPClient: hcpMockServer.Client(), - Context: context.Background(), - }, - func() { - deleteSecrets(k8s) - }, - func() { - // Check the hcp resource id secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPResourceID, secretKeyHCPResourceID, - bsConfig.HCPConfig.ResourceID, corev1.SecretTypeOpaque) - - // Check the hcp client id secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPClientID, secretKeyHCPClientID, - bsConfig.HCPConfig.ClientID, corev1.SecretTypeOpaque) - - // Check the hcp client secret secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPClientSecret, secretKeyHCPClientSecret, - bsConfig.HCPConfig.ClientSecret, corev1.SecretTypeOpaque) - - // Check the bootstrap token secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameBootstrapToken, secretKeyBootstrapToken, - bsConfig.ConsulConfig.ACL.Tokens.InitialManagement, corev1.SecretTypeOpaque) - - // Check the gossip key secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameGossipKey, secretKeyGossipKey, - bsConfig.BootstrapResponse.Bootstrap.GossipKey, corev1.SecretTypeOpaque) - - // Check the server cert secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCert, corev1.TLSCertKey, - bsConfig.BootstrapResponse.Bootstrap.ServerTLS.Cert, corev1.SecretTypeTLS) - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCert, corev1.TLSPrivateKeyKey, - bsConfig.BootstrapResponse.Bootstrap.ServerTLS.PrivateKey, corev1.SecretTypeTLS) - - // Check the server CA secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCA, corev1.TLSCertKey, - bsConfig.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities[0], corev1.SecretTypeOpaque) - - // Check that HCP scada address, auth url, and api hostname are not saved - hcpAuthURLSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPAuthURL, metav1.GetOptions{}) - require.Nil(t, hcpAuthURLSecret) - hcpApiHostnameSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPAPIHostname, metav1.GetOptions{}) - require.Nil(t, hcpApiHostnameSecret) - hcpScadaAddress, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPScadaAddress, metav1.GetOptions{}) - require.Nil(t, hcpScadaAddress) - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - config, err := tc.installer.GetValueMap() - require.NoError(t, err) - require.NotNil(t, config) - if tc.installer.SkipSavingSecrets { - checkSecretsWereNotSaved(k8s) - } else { - tc.requireCheck() - } - tc.postProcessingFunc() - }) - } - os.Unsetenv("HCP_AUTH_URL") - os.Unsetenv("HCP_API_HOST") - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") -} - -// TestParseBootstrapConfigResponse tests that response string from agent bootstrap -// config endpoint can be converted into CloudBootstrapConfig bootstrap object. -func TestParseBootstrapConfigResponse(t *testing.T) { - testCases := []struct { - description string - input string - expectedConfig *CloudBootstrapConfig - }{ - { - "Should properly parse a valid response.", - validResponse, - validBootstrapConfig, - }, - } - - cloudPreset := &CloudPreset{ - HCPConfig: hcpConfig, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - } - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - config, err := cloudPreset.parseBootstrapConfigResponse(validBootstrapReponse) - require.NoError(t, err) - require.Equal(t, tc.expectedConfig, config) - }) - } -} - -func TestSaveSecretsFromBootstrapConfig(t *testing.T) { - t.Parallel() - - // Create fake k8s. - k8s := fake.NewSimpleClientset() - - testCases := []struct { - description string - expectsError bool - expectedErrorMessage string - preProcessingFunc func() - postProcessingFunc func() - }{ - { - "Properly saves secrets with a full bootstrapConfig.", - false, - "", - func() {}, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp client id secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPClientId, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPClientId, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp client secret secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPClientSecret, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPClientSecret, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp resource id secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPResourceId, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPResourceId, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp auth url secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPAuthURL, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPAuthURL, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp api hostname secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPApiHostname, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPApiHostname, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp scada address secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPScadaAddress, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPScadaAddress, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when bootstrap token secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameBootstrap, namespace), - func() { - savePlaceholderSecret(expectedSecretNameBootstrap, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when gossip key secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameGossipKey, namespace), - func() { - savePlaceholderSecret(expectedSecretNameGossipKey, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when server cert secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameServerCert, namespace), - func() { - savePlaceholderSecret(expectedSecretNameServerCert, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when server CA secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameServerCA, namespace), - func() { - savePlaceholderSecret(expectedSecretNameServerCA, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - } - cloudPreset := &CloudPreset{ - HCPConfig: hcpConfig, - KubernetesClient: k8s, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - } - - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - tc.preProcessingFunc() - err := cloudPreset.saveSecretsFromBootstrapConfig(validBootstrapConfig) - if tc.expectsError && err != nil { - require.Equal(t, tc.expectedErrorMessage, err.Error()) - - } else { - require.NoError(t, err) - require.Equal(t, expectedSecretNameBootstrap, secretNameBootstrapToken) - require.Equal(t, expectedSecretNameGossipKey, secretNameGossipKey) - require.Equal(t, expectedSecretNameHCPClientId, secretNameHCPClientID) - require.Equal(t, expectedSecretNameHCPClientSecret, secretNameHCPClientSecret) - require.Equal(t, expectedSecretNameHCPResourceId, secretNameHCPResourceID) - require.Equal(t, expectedSecretNameServerCA, secretNameServerCA) - require.Equal(t, expectedSecretNameServerCert, secretNameServerCert) - - checkAllSecretsWereSaved(t, k8s, validBootstrapConfig) - - } - tc.postProcessingFunc() - }) - } - -} - -func TestGetHelmConfigWithMapSecretNames(t *testing.T) { - t.Parallel() - - const expectedFull = `connectInject: - enabled: true -controller: - enabled: true -global: - acls: - bootstrapToken: - secretKey: token - secretName: consul-bootstrap-token - manageSystemACLs: true - cloud: - apiHost: - secretKey: api-hostname - secretName: consul-hcp-api-host - authUrl: - secretKey: auth-url - secretName: consul-hcp-auth-url - clientId: - secretKey: client-id - secretName: consul-hcp-client-id - clientSecret: - secretKey: client-secret - secretName: consul-hcp-client-secret - enabled: true - resourceId: - secretKey: resource-id - secretName: consul-hcp-resource-id - scadaAddress: - secretKey: scada-address - secretName: consul-hcp-scada-address - datacenter: dc1 - gossipEncryption: - secretKey: key - secretName: consul-gossip-key - tls: - caCert: - secretKey: tls.crt - secretName: consul-server-ca - enableAutoEncrypt: true - enabled: true -server: - affinity: null - replicas: 3 - serverCert: - secretName: consul-server-cert -` - - const expectedWithoutOptional = `connectInject: - enabled: true -controller: - enabled: true -global: - acls: - bootstrapToken: - secretKey: token - secretName: consul-bootstrap-token - manageSystemACLs: true - cloud: - clientId: - secretKey: client-id - secretName: consul-hcp-client-id - clientSecret: - secretKey: client-secret - secretName: consul-hcp-client-secret - enabled: true - resourceId: - secretKey: resource-id - secretName: consul-hcp-resource-id - datacenter: dc1 - gossipEncryption: - secretKey: key - secretName: consul-gossip-key - tls: - caCert: - secretKey: tls.crt - secretName: consul-server-ca - enableAutoEncrypt: true - enabled: true -server: - affinity: null - replicas: 3 - serverCert: - secretName: consul-server-cert -` - - cloudPreset := &CloudPreset{} - - testCases := []struct { - description string - config *CloudBootstrapConfig - expectedYaml string - }{ - {"Config including optional parameters", - &CloudBootstrapConfig{ - BootstrapResponse: &models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse{ - Cluster: &models.HashicorpCloudGlobalNetworkManager20220215Cluster{ - BootstrapExpect: 3, - ID: "dc1", - }, - }, - HCPConfig: HCPConfig{ - ResourceID: "consul-hcp-resource-id", - ClientID: "consul-hcp-client-id", - ClientSecret: "consul-hcp-client-secret", - AuthURL: "consul-hcp-auth-url", - APIHostname: "consul-hcp-api-host", - ScadaAddress: "consul-hcp-scada-address", - }, - }, - expectedFull, - }, - {"Config without optional parameters", - &CloudBootstrapConfig{ - BootstrapResponse: &models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse{ - Cluster: &models.HashicorpCloudGlobalNetworkManager20220215Cluster{ - BootstrapExpect: 3, - ID: "dc1", - }, - }, - HCPConfig: HCPConfig{ - ResourceID: "consul-hcp-resource-id", - ClientID: "consul-hcp-client-id", - ClientSecret: "consul-hcp-client-secret", - }, - }, - expectedWithoutOptional, - }, - } - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - cloudHelmValues := cloudPreset.getHelmConfigWithMapSecretNames(tc.config) - require.NotNil(t, cloudHelmValues) - valuesYaml, err := yaml.Marshal(cloudHelmValues) - yml := string(valuesYaml) - require.NoError(t, err) - require.Equal(t, tc.expectedYaml, yml) - }) - } - -} - -func savePlaceholderSecret(secretName string, k8sClient kubernetes.Interface) { - data := map[string][]byte{} - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, - }, - Data: data, - Type: corev1.SecretTypeOpaque, - } - k8sClient.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}) -} - -func deleteSecrets(k8sClient kubernetes.Interface) { - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPClientId, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPClientSecret, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPResourceId, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPAuthURL, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPApiHostname, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPScadaAddress, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameBootstrap, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameGossipKey, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameServerCert, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameServerCA, metav1.DeleteOptions{}) -} - -func checkAllSecretsWereSaved(t require.TestingT, k8s kubernetes.Interface, expectedConfig *CloudBootstrapConfig) { - - // Check that namespace is created - _, err := k8s.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) - require.NoError(t, err) - - // Check the hcp resource id secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPResourceID, secretKeyHCPResourceID, - expectedConfig.HCPConfig.ResourceID, corev1.SecretTypeOpaque) - - // Check the hcp client id secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPClientID, secretKeyHCPClientID, - expectedConfig.HCPConfig.ClientID, corev1.SecretTypeOpaque) - - // Check the hcp client secret secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPClientSecret, secretKeyHCPClientSecret, - expectedConfig.HCPConfig.ClientSecret, corev1.SecretTypeOpaque) - - // Check the hcp auth URL secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPAuthURL, secretKeyHCPAuthURL, - expectedConfig.HCPConfig.AuthURL, corev1.SecretTypeOpaque) - - // Check the hcp api hostname secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPAPIHostname, secretKeyHCPAPIHostname, - expectedConfig.HCPConfig.APIHostname, corev1.SecretTypeOpaque) - - // Check the hcp scada address secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPScadaAddress, secretKeyHCPScadaAddress, - expectedConfig.HCPConfig.ScadaAddress, corev1.SecretTypeOpaque) - - // Check the bootstrap token secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameBootstrapToken, secretKeyBootstrapToken, - expectedConfig.ConsulConfig.ACL.Tokens.InitialManagement, corev1.SecretTypeOpaque) - - // Check the gossip key secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameGossipKey, secretKeyGossipKey, - expectedConfig.BootstrapResponse.Bootstrap.GossipKey, corev1.SecretTypeOpaque) - - // Check the server cert secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCert, corev1.TLSCertKey, - expectedConfig.BootstrapResponse.Bootstrap.ServerTLS.Cert, corev1.SecretTypeTLS) - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCert, corev1.TLSPrivateKeyKey, - expectedConfig.BootstrapResponse.Bootstrap.ServerTLS.PrivateKey, corev1.SecretTypeTLS) - - // Check the server CA secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCA, corev1.TLSCertKey, - expectedConfig.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities[0], corev1.SecretTypeOpaque) -} - -func ensureSecretKeyValueMatchesExpected(t require.TestingT, k8s kubernetes.Interface, - secretName, secretKey, - expectedValue string, expectedSecretType corev1.SecretType) { - secret, err := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, expectedValue, string(secret.Data[secretKey])) - require.Equal(t, expectedSecretType, secret.Type) - require.Equal(t, common.CLILabelValue, secret.Labels[common.CLILabelKey]) -} - -func checkSecretsWereNotSaved(k8s kubernetes.Interface) bool { - ns, _ := k8s.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) - hcpClientIdSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPClientID, metav1.GetOptions{}) - hcpClientSecretSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPClientSecret, metav1.GetOptions{}) - hcpResourceIdSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPResourceID, metav1.GetOptions{}) - bootstrapSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameBootstrapToken, metav1.GetOptions{}) - gossipKeySecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameGossipKey, metav1.GetOptions{}) - serverCertSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameServerCert, metav1.GetOptions{}) - serverCASecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameServerCA, metav1.GetOptions{}) - return ns == nil && hcpClientIdSecret == nil && hcpClientSecretSecret == nil && - hcpResourceIdSecret == nil && bootstrapSecret == nil && - gossipKeySecret == nil && serverCASecret == nil && serverCertSecret == nil -} - -func getDeepCopyOfValidBootstrapConfig() *CloudBootstrapConfig { - data, err := json.Marshal(validBootstrapConfig) - if err != nil { - panic(err) - } - - var copy *CloudBootstrapConfig - if err := json.Unmarshal(data, ©); err != nil { - panic(err) - } - return copy -} diff --git a/cli/preset/demo.go b/cli/preset/demo.go deleted file mode 100644 index bf6c0bb122..0000000000 --- a/cli/preset/demo.go +++ /dev/null @@ -1,43 +0,0 @@ -package preset - -import "github.com/hashicorp/consul-k8s/cli/config" - -// DemoPreset struct is an implementation of the Preset interface that provides -// a Helm values map that is used during installation and represents the -// the quickstart configuration for Consul on Kubernetes. -type DemoPreset struct{} - -// GetValueMap returns the Helm value map representing the quickstart -// configuration for Consul on Kubernetes. It does the following: -// - server replicas equal to 1. -// - enables the service mesh. -// - enables the ui. -// - enables metrics. -// - enables Prometheus. -func (i *DemoPreset) GetValueMap() (map[string]interface{}, error) { - values := ` -global: - name: consul - metrics: - enabled: true - enableAgentMetrics: true -connectInject: - enabled: true - metrics: - defaultEnabled: true - defaultEnableMerging: true - enableGatewayMetrics: true -server: - replicas: 1 -controller: - enabled: true -ui: - enabled: true - service: - enabled: true -prometheus: - enabled: true -` - - return config.ConvertToMap(values), nil -} diff --git a/cli/preset/preset.go b/cli/preset/preset.go deleted file mode 100644 index 2eb2c94bc4..0000000000 --- a/cli/preset/preset.go +++ /dev/null @@ -1,84 +0,0 @@ -package preset - -import ( - "fmt" - "os" -) - -const ( - PresetSecure = "secure" - PresetQuickstart = "quickstart" - PresetCloud = "cloud" - - EnvHCPClientID = "HCP_CLIENT_ID" - EnvHCPClientSecret = "HCP_CLIENT_SECRET" - EnvHCPAuthURL = "HCP_AUTH_URL" - EnvHCPAPIHost = "HCP_API_HOST" - EnvHCPScadaAddress = "HCP_SCADA_ADDRESS" -) - -// Presets is a list of all the available presets for use with CLI's install -// and uninstall commands. -var Presets = []string{PresetCloud, PresetQuickstart, PresetSecure} - -// Preset is the interface that each instance must implement. For demo and -// secure presets, they merely return a pre-configred value map. For cloud, -// it must fetch configuration from HCP, save various secrets from the response, -// and map the secret names into the value map. -type Preset interface { - GetValueMap() (map[string]interface{}, error) -} - -type GetPresetConfig struct { - Name string - CloudPreset *CloudPreset -} - -// GetPreset is a factory function that, given a configuration, produces a -// struct that implements the Preset interface based on the name in the -// configuration. If the string is not recognized an error is returned. This -// helper function is utilized by both the cli install and upgrade commands. -func GetPreset(config *GetPresetConfig) (Preset, error) { - switch config.Name { - case PresetCloud: - return config.CloudPreset, nil - case PresetQuickstart: - return &QuickstartPreset{}, nil - case PresetSecure: - return &SecurePreset{}, nil - } - return nil, fmt.Errorf("'%s' is not a valid preset", config.Name) -} - -func GetHCPPresetFromEnv(resourceID string) *HCPConfig { - hcpConfig := &HCPConfig{ - ResourceID: resourceID, - } - - // Read clientID from environment - if clientID, ok := os.LookupEnv(EnvHCPClientID); ok { - hcpConfig.ClientID = clientID - } - - // Read clientSecret from environment - if clientSecret, ok := os.LookupEnv(EnvHCPClientSecret); ok { - hcpConfig.ClientSecret = clientSecret - } - - // Read authURL from environment - if authURL, ok := os.LookupEnv(EnvHCPAuthURL); ok { - hcpConfig.AuthURL = authURL - } - - // Read apiHost from environment - if apiHost, ok := os.LookupEnv(EnvHCPAPIHost); ok { - hcpConfig.APIHostname = apiHost - } - - // Read scadaAddress from environment - if scadaAddress, ok := os.LookupEnv(EnvHCPScadaAddress); ok { - hcpConfig.ScadaAddress = scadaAddress - } - - return hcpConfig -} diff --git a/cli/preset/preset_test.go b/cli/preset/preset_test.go deleted file mode 100644 index c39c11e80f..0000000000 --- a/cli/preset/preset_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package preset - -import ( - "os" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestGetHCPPresetFromEnv(t *testing.T) { - const ( - scadaAddress = "scada-address" - clientID = "client-id" - clientSecret = "client-secret" - apiHost = "api-host" - authURL = "auth-url" - resourceID = "resource-id" - ) - - testCases := []struct { - description string - resourceID string - preProcessingFunc func() - postProcessingFunc func() - expectedPreset *HCPConfig - }{ - { - "Should populate properties in addition to resourceID on HCPConfig when environment variables are set.", - resourceID, - func() { - os.Setenv(EnvHCPClientID, clientID) - os.Setenv(EnvHCPClientSecret, clientSecret) - os.Setenv(EnvHCPAPIHost, apiHost) - os.Setenv(EnvHCPAuthURL, authURL) - os.Setenv(EnvHCPScadaAddress, scadaAddress) - }, - func() { - os.Unsetenv(EnvHCPClientID) - os.Unsetenv(EnvHCPClientSecret) - os.Unsetenv(EnvHCPAPIHost) - os.Unsetenv(EnvHCPAuthURL) - os.Unsetenv(EnvHCPScadaAddress) - }, - &HCPConfig{ - ResourceID: resourceID, - ClientID: clientID, - ClientSecret: clientSecret, - AuthURL: authURL, - APIHostname: apiHost, - ScadaAddress: scadaAddress, - }, - }, - { - "Should only populate resourceID on HCPConfig when environment variables are not set.", - resourceID, - func() { - os.Unsetenv(EnvHCPClientID) - os.Unsetenv(EnvHCPClientSecret) - os.Unsetenv(EnvHCPAPIHost) - os.Unsetenv(EnvHCPAuthURL) - os.Unsetenv(EnvHCPScadaAddress) - }, - func() {}, - &HCPConfig{ - ResourceID: resourceID, - }, - }, - } - - for _, testCase := range testCases { - testCase.preProcessingFunc() - defer testCase.postProcessingFunc() - t.Run(testCase.description, func(t *testing.T) { - hcpPreset := GetHCPPresetFromEnv(testCase.resourceID) - require.Equal(t, testCase.expectedPreset, hcpPreset) - }) - } -} diff --git a/cli/preset/quickstart.go b/cli/preset/quickstart.go deleted file mode 100644 index 52b3f000b1..0000000000 --- a/cli/preset/quickstart.go +++ /dev/null @@ -1,43 +0,0 @@ -package preset - -import "github.com/hashicorp/consul-k8s/cli/config" - -// QuickstartPreset struct is an implementation of the Preset interface that provides -// a Helm values map that is used during installation and represents the -// the quickstart configuration for Consul on Kubernetes. -type QuickstartPreset struct{} - -// GetValueMap returns the Helm value map representing the quickstart -// configuration for Consul on Kubernetes. It does the following: -// - server replicas equal to 1. -// - enables the service mesh. -// - enables the ui. -// - enables metrics. -// - enables Prometheus. -func (i *QuickstartPreset) GetValueMap() (map[string]interface{}, error) { - values := ` -global: - name: consul - metrics: - enabled: true - enableAgentMetrics: true -connectInject: - enabled: true - metrics: - defaultEnabled: true - defaultEnableMerging: true - enableGatewayMetrics: true -server: - replicas: 1 -controller: - enabled: true -ui: - enabled: true - service: - enabled: true -prometheus: - enabled: true -` - - return config.ConvertToMap(values), nil -} diff --git a/cli/preset/secure.go b/cli/preset/secure.go deleted file mode 100644 index ded436804c..0000000000 --- a/cli/preset/secure.go +++ /dev/null @@ -1,37 +0,0 @@ -package preset - -import "github.com/hashicorp/consul-k8s/cli/config" - -// SecurePreset struct is an implementation of the Preset interface that provides -// a Helm values map that is used during installation and represents the -// the quickstart configuration for Consul on Kubernetes. -type SecurePreset struct{} - -// GetValueMap returns the Helm value map representing the quickstart -// configuration for Consul on Kubernetes. It does the following: -// - server replicas equal to 1. -// - enables the service mesh. -// - enables tls. -// - enables gossip encryption. -// - enables ACLs. -func (i *SecurePreset) GetValueMap() (map[string]interface{}, error) { - values := ` -global: - name: consul - gossipEncryption: - autoGenerate: true - tls: - enabled: true - enableAutoEncrypt: true - acls: - manageSystemACLs: true -server: - replicas: 1 -connectInject: - enabled: true -controller: - enabled: true -` - - return config.ConvertToMap(values), nil -} diff --git a/cli/version/version.go b/cli/version/version.go index 35c4d195a9..082ffeb1f3 100644 --- a/cli/version/version.go +++ b/cli/version/version.go @@ -14,12 +14,12 @@ var ( // // Version must conform to the format expected by // github.com/hashicorp/go-version for tests to work. - Version = "1.0.0" + Version = "0.49.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. - VersionPrerelease = "dev" + VersionPrerelease = "" ) // GetHumanVersion composes the parts of the version in a way that's suitable diff --git a/control-plane/Dockerfile b/control-plane/Dockerfile index 33e42966bb..802ec4fab2 100644 --- a/control-plane/Dockerfile +++ b/control-plane/Dockerfile @@ -37,7 +37,7 @@ LABEL name=${BIN_NAME} \ ENV BIN_NAME=${BIN_NAME} ENV VERSION=${VERSION} -RUN apk add --no-cache ca-certificates gnupg libcap openssl su-exec iputils libc6-compat iptables +RUN apk add --no-cache ca-certificates curl gnupg libcap openssl su-exec iputils libc6-compat iptables # Create a non-root user to run the software. RUN addgroup ${BIN_NAME} && \ @@ -84,7 +84,7 @@ LABEL name=${BIN_NAME} \ ENV BIN_NAME=${BIN_NAME} ENV VERSION=${PRODUCT_VERSION} -RUN apk add --no-cache ca-certificates gnupg libcap openssl su-exec iputils libc6-compat iptables +RUN apk add --no-cache ca-certificates curl gnupg libcap openssl su-exec iputils libc6-compat iptables # TARGETOS and TARGETARCH are set automatically when --platform is provided. ARG TARGETOS @@ -144,7 +144,7 @@ ARG TARGETOS ARG TARGETARCH # Copy license for Red Hat certification. -COPY LICENSE /licenses/mozilla.txt +COPY LICENSE.md /licenses/mozilla.txt RUN microdnf install -y ca-certificates gnupg libcap openssl shadow-utils iptables diff --git a/control-plane/Makefile b/control-plane/Makefile index 4c3cbac971..b6d6594c3d 100644 --- a/control-plane/Makefile +++ b/control-plane/Makefile @@ -22,8 +22,29 @@ CI_DEV_DOCKER_NAMESPACE?=hashicorpdev CI_DEV_DOCKER_IMAGE_NAME?=consul-k8s-control-plane CI_DEV_DOCKER_WORKDIR?=. CONSUL_K8S_IMAGE_VERSION?=latest + +# Helm Test Image +CI_DEV_HELM_TEST_IMAGE?=consul-helm-test +# Represent the latest supported version for this branch +# Increment this when building a new version container +TEST_IMAGE_VERSION=0.12.3 +HELM_TEST_WORKDIR=../charts/consul/test/docker + ################ +# Make target for building and pushing the helm test container +# used to run various pipeline tests (including GKE/AKS/EKS). This container +# provides the necessary dependencies for running on our cloud targets. +ci.dev-helm-test-docker: + @echo "Building helm test Development container - $(CI_DEV_HELM_TEST_IMAGE)" + @echo $(DOCKER_PASS) | docker login -u="$(DOCKER_USER)" --password-stdin + @docker buildx create --use && docker buildx build -t '$(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_HELM_TEST_IMAGE):$(TEST_IMAGE_VERSION)' \ + --platform linux/amd64,linux/arm64 \ + --push \ + --label COMMIT_SHA=$(GIT_COMMIT) \ + $(HELM_TEST_WORKDIR) -f $(HELM_TEST_WORKDIR)/Test.dockerfile + @echo "Pushed dev image to: $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_HELM_TEST_IMAGE):$(TEST_IMAGE_VERSION)" + # TODO: Remove this ci.dev-docker target once we move the acceptance tests to Github Actions. # In CircleCI, the linux binary will be attached from a previous step at pkg/bin/linux_amd64/. This make target # should only run in CI and not locally. diff --git a/control-plane/api/v1alpha1/exportedservices_types.go b/control-plane/api/v1alpha1/exportedservices_types.go index e05f17a177..3087379c62 100644 --- a/control-plane/api/v1alpha1/exportedservices_types.go +++ b/control-plane/api/v1alpha1/exportedservices_types.go @@ -167,7 +167,7 @@ func (in *ExportedService) toConsul() capi.ExportedService { for _, consumer := range in.Consumers { consumers = append(consumers, capi.ServiceConsumer{ Partition: consumer.Partition, - Peer: consumer.Peer, + PeerName: consumer.Peer, }) } return capi.ExportedService{ diff --git a/control-plane/api/v1alpha1/exportedservices_types_test.go b/control-plane/api/v1alpha1/exportedservices_types_test.go index 8826166a76..0810f8edaf 100644 --- a/control-plane/api/v1alpha1/exportedservices_types_test.go +++ b/control-plane/api/v1alpha1/exportedservices_types_test.go @@ -90,7 +90,7 @@ func TestExportedServices_MatchesConsul(t *testing.T) { Partition: "third", }, { - Peer: "second-peer", + PeerName: "second-peer", }, }, }, @@ -105,7 +105,7 @@ func TestExportedServices_MatchesConsul(t *testing.T) { Partition: "fifth", }, { - Peer: "third-peer", + PeerName: "third-peer", }, }, }, @@ -214,7 +214,7 @@ func TestExportedServices_ToConsul(t *testing.T) { Partition: "third", }, { - Peer: "second-peer", + PeerName: "second-peer", }, }, }, @@ -229,7 +229,7 @@ func TestExportedServices_ToConsul(t *testing.T) { Partition: "fifth", }, { - Peer: "third-peer", + PeerName: "third-peer", }, }, }, diff --git a/control-plane/api/v1alpha1/exportedservices_webhook.go b/control-plane/api/v1alpha1/exportedservices_webhook.go index 5a3d2cb2f1..d80062e958 100644 --- a/control-plane/api/v1alpha1/exportedservices_webhook.go +++ b/control-plane/api/v1alpha1/exportedservices_webhook.go @@ -7,6 +7,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -16,9 +17,10 @@ import ( type ExportedServicesWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder - ConsulMeta common.ConsulMeta + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder + ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/exportedservices_webhook_test.go b/control-plane/api/v1alpha1/exportedservices_webhook_test.go index 6548c131f7..a1af17f7e8 100644 --- a/control-plane/api/v1alpha1/exportedservices_webhook_test.go +++ b/control-plane/api/v1alpha1/exportedservices_webhook_test.go @@ -176,10 +176,11 @@ func TestValidateExportedServices(t *testing.T) { require.NoError(t, err) validator := &ExportedServicesWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, - ConsulMeta: c.consulMeta, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, + ConsulMeta: c.consulMeta, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/ingressgateway_webhook.go b/control-plane/api/v1alpha1/ingressgateway_webhook.go index 7f8ba37558..8dcc2fa9ee 100644 --- a/control-plane/api/v1alpha1/ingressgateway_webhook.go +++ b/control-plane/api/v1alpha1/ingressgateway_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type IngressGatewayWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/mesh_types.go b/control-plane/api/v1alpha1/mesh_types.go index 502e567829..7053db67bc 100644 --- a/control-plane/api/v1alpha1/mesh_types.go +++ b/control-plane/api/v1alpha1/mesh_types.go @@ -52,8 +52,6 @@ type MeshSpec struct { TLS *MeshTLSConfig `json:"tls,omitempty"` // HTTP defines the HTTP configuration for the service mesh. HTTP *MeshHTTPConfig `json:"http,omitempty"` - // Peering defines the peering configuration for the service mesh. - Peering *PeeringMeshConfig `json:"peering,omitempty"` } // TransparentProxyMeshConfig controls configuration specific to proxies in "transparent" mode. Added in v1.10.0. @@ -77,15 +75,6 @@ type MeshHTTPConfig struct { SanitizeXForwardedClientCert bool `json:"sanitizeXForwardedClientCert"` } -type PeeringMeshConfig struct { - // PeerThroughMeshGateways determines whether peering traffic between - // control planes should flow through mesh gateways. If enabled, - // Consul servers will advertise mesh gateway addresses as their own. - // Additionally, mesh gateways will configure themselves to expose - // the local servers using a peering-specific SNI. - PeerThroughMeshGateways bool `json:"peerThroughMeshGateways,omitempty"` -} - type MeshDirectionalTLSConfig struct { // TLSMinVersion sets the default minimum TLS version supported. // One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. @@ -192,7 +181,6 @@ func (in *Mesh) ToConsul(datacenter string) capi.ConfigEntry { TransparentProxy: in.Spec.TransparentProxy.toConsul(), TLS: in.Spec.TLS.toConsul(), HTTP: in.Spec.HTTP.toConsul(), - Peering: in.Spec.Peering.toConsul(), Meta: meta(datacenter), } } @@ -206,12 +194,11 @@ func (in *Mesh) MatchesConsul(candidate capi.ConfigEntry) bool { return cmp.Equal(in.ToConsul(""), configEntry, cmpopts.IgnoreFields(capi.MeshConfigEntry{}, "Partition", "Namespace", "Meta", "ModifyIndex", "CreateIndex"), cmpopts.IgnoreUnexported(), cmpopts.EquateEmpty()) } -func (in *Mesh) Validate(consulMeta common.ConsulMeta) error { +func (in *Mesh) Validate(_ common.ConsulMeta) error { var errs field.ErrorList path := field.NewPath("spec") errs = append(errs, in.Spec.TLS.validate(path.Child("tls"))...) - errs = append(errs, in.Spec.Peering.validate(path.Child("peering"), consulMeta.PartitionsEnabled, consulMeta.Partition)...) if len(errs) > 0 { return apierrors.NewInvalid( @@ -279,28 +266,6 @@ func (in *MeshDirectionalTLSConfig) toConsul() *capi.MeshDirectionalTLSConfig { } } -func (in *PeeringMeshConfig) toConsul() *capi.PeeringMeshConfig { - if in == nil { - return nil - } - return &capi.PeeringMeshConfig{PeerThroughMeshGateways: in.PeerThroughMeshGateways} -} - -func (in *PeeringMeshConfig) validate(path *field.Path, partitionsEnabled bool, partition string) field.ErrorList { - if in == nil { - return nil - } - - var errs field.ErrorList - - if partitionsEnabled && in.PeerThroughMeshGateways && partition != common.DefaultConsulPartition { - errs = append(errs, field.Forbidden(path.Child("peerThroughMeshGateways"), - "\"peerThroughMeshGateways\" is only valid in the \"default\" partition")) - } - - return errs -} - // DefaultNamespaceFields has no behaviour here as meshes have no namespace specific fields. func (in *Mesh) DefaultNamespaceFields(_ common.ConsulMeta) { } diff --git a/control-plane/api/v1alpha1/mesh_types_test.go b/control-plane/api/v1alpha1/mesh_types_test.go index 392c38d354..99de86d1fd 100644 --- a/control-plane/api/v1alpha1/mesh_types_test.go +++ b/control-plane/api/v1alpha1/mesh_types_test.go @@ -60,9 +60,6 @@ func TestMesh_MatchesConsul(t *testing.T) { HTTP: &MeshHTTPConfig{ SanitizeXForwardedClientCert: true, }, - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, }, }, Theirs: &capi.MeshConfigEntry{ @@ -84,9 +81,6 @@ func TestMesh_MatchesConsul(t *testing.T) { HTTP: &capi.MeshHTTPConfig{ SanitizeXForwardedClientCert: true, }, - Peering: &capi.PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, CreateIndex: 1, ModifyIndex: 2, Meta: map[string]string{ @@ -160,9 +154,6 @@ func TestMesh_ToConsul(t *testing.T) { HTTP: &MeshHTTPConfig{ SanitizeXForwardedClientCert: true, }, - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, }, }, Exp: &capi.MeshConfigEntry{ @@ -184,9 +175,6 @@ func TestMesh_ToConsul(t *testing.T) { HTTP: &capi.MeshHTTPConfig{ SanitizeXForwardedClientCert: true, }, - Peering: &capi.PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, Namespace: "", Meta: map[string]string{ common.SourceKey: common.SourceValue, @@ -209,7 +197,6 @@ func TestMesh_Validate(t *testing.T) { cases := map[string]struct { input *Mesh expectedErrMsgs []string - consulMeta common.ConsulMeta }{ "tls.incoming.minTLSVersion invalid": { input: &Mesh{ @@ -309,53 +296,6 @@ func TestMesh_Validate(t *testing.T) { }, }, }, - "peering.peerThroughMeshGateways in invalid partition": { - input: &Mesh{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - }, - Spec: MeshSpec{ - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, - }, - }, - consulMeta: common.ConsulMeta{ - Partition: "blurg", - PartitionsEnabled: true, - }, - expectedErrMsgs: []string{ - `spec.peering.peerThroughMeshGateways: Forbidden: "peerThroughMeshGateways" is only valid in the "default" partition`, - }, - }, - "peering.peerThroughMeshGateways valid partition": { - input: &Mesh{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - }, - Spec: MeshSpec{ - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, - }, - }, - consulMeta: common.ConsulMeta{ - Partition: "default", - PartitionsEnabled: true, - }, - }, - "peering.peerThroughMeshGateways valid with no partitions": { - input: &Mesh{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - }, - Spec: MeshSpec{ - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, - }, - }, - }, "multiple errors": { input: &Mesh{ ObjectMeta: metav1.ObjectMeta{ @@ -372,28 +312,20 @@ func TestMesh_Validate(t *testing.T) { TLSMaxVersion: "bar", }, }, - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, }, }, - consulMeta: common.ConsulMeta{ - Partition: "blurg", - PartitionsEnabled: true, - }, expectedErrMsgs: []string{ `spec.tls.incoming.tlsMinVersion: Invalid value: "foo": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, `spec.tls.incoming.tlsMaxVersion: Invalid value: "bar": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, `spec.tls.outgoing.tlsMinVersion: Invalid value: "foo": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, `spec.tls.outgoing.tlsMaxVersion: Invalid value: "bar": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, - `spec.peering.peerThroughMeshGateways: Forbidden: "peerThroughMeshGateways" is only valid in the "default" partition`, }, }, } for name, testCase := range cases { t.Run(name, func(t *testing.T) { - err := testCase.input.Validate(testCase.consulMeta) + err := testCase.input.Validate(common.ConsulMeta{}) if len(testCase.expectedErrMsgs) != 0 { require.Error(t, err) for _, s := range testCase.expectedErrMsgs { diff --git a/control-plane/api/v1alpha1/mesh_webhook.go b/control-plane/api/v1alpha1/mesh_webhook.go index 5c714c4e5f..d28cfc193c 100644 --- a/control-plane/api/v1alpha1/mesh_webhook.go +++ b/control-plane/api/v1alpha1/mesh_webhook.go @@ -7,6 +7,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -16,12 +17,9 @@ import ( type MeshWebhook struct { client.Client - Logger logr.Logger - - // ConsulMeta contains metadata specific to the Consul installation. - ConsulMeta common.ConsulMeta - - decoder *admission.Decoder + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder } // NOTE: The path value in the below line is the path to the webhook. @@ -61,19 +59,7 @@ func (v *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi } } - return common.ValidateConfigEntry(ctx, req, v.Logger, v, &mesh, v.ConsulMeta) -} - -func (v *MeshWebhook) List(ctx context.Context) ([]common.ConfigEntryResource, error) { - var meshList MeshList - if err := v.Client.List(ctx, &meshList); err != nil { - return nil, err - } - var entries []common.ConfigEntryResource - for _, item := range meshList.Items { - entries = append(entries, common.ConfigEntryResource(&item)) - } - return entries, nil + return admission.Allowed(fmt.Sprintf("valid %s request", mesh.KubeKind())) } func (v *MeshWebhook) InjectDecoder(d *admission.Decoder) error { diff --git a/control-plane/api/v1alpha1/mesh_webhook_test.go b/control-plane/api/v1alpha1/mesh_webhook_test.go index 55b0c3a77d..633ec55497 100644 --- a/control-plane/api/v1alpha1/mesh_webhook_test.go +++ b/control-plane/api/v1alpha1/mesh_webhook_test.go @@ -63,23 +63,6 @@ func TestValidateMesh(t *testing.T) { expAllow: false, expErrMessage: "mesh resource name must be \"mesh\"", }, - "validation rejects": { - existingResources: nil, - newResource: &Mesh{ - ObjectMeta: metav1.ObjectMeta{ - Name: common.Mesh, - }, - Spec: MeshSpec{ - TLS: &MeshTLSConfig{ - Incoming: &MeshDirectionalTLSConfig{ - TLSMinVersion: "foo", - }, - }, - }, - }, - expAllow: false, - expErrMessage: "mesh.consul.hashicorp.com \"mesh\" is invalid: spec.tls.incoming.tlsMinVersion: Invalid value: \"foo\": must be one of \"TLS_AUTO\", \"TLSv1_0\", \"TLSv1_1\", \"TLSv1_2\", \"TLSv1_3\", \"\"", - }, } for name, c := range cases { t.Run(name, func(t *testing.T) { @@ -93,9 +76,10 @@ func TestValidateMesh(t *testing.T) { require.NoError(t, err) validator := &MeshWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/peeringacceptor_webhook.go b/control-plane/api/v1alpha1/peeringacceptor_webhook.go index 60367c1384..728bd205ee 100644 --- a/control-plane/api/v1alpha1/peeringacceptor_webhook.go +++ b/control-plane/api/v1alpha1/peeringacceptor_webhook.go @@ -6,6 +6,7 @@ import ( "net/http" "github.com/go-logr/logr" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -15,8 +16,10 @@ import ( type PeeringAcceptorWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder + //ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/peeringacceptor_webhook_test.go b/control-plane/api/v1alpha1/peeringacceptor_webhook_test.go index a65966881a..26ed3e2150 100644 --- a/control-plane/api/v1alpha1/peeringacceptor_webhook_test.go +++ b/control-plane/api/v1alpha1/peeringacceptor_webhook_test.go @@ -134,9 +134,10 @@ func TestValidatePeeringAcceptor(t *testing.T) { require.NoError(t, err) validator := &PeeringAcceptorWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/peeringdialer_webhook.go b/control-plane/api/v1alpha1/peeringdialer_webhook.go index fc0b1c38f6..587f998155 100644 --- a/control-plane/api/v1alpha1/peeringdialer_webhook.go +++ b/control-plane/api/v1alpha1/peeringdialer_webhook.go @@ -6,6 +6,7 @@ import ( "net/http" "github.com/go-logr/logr" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -15,8 +16,9 @@ import ( type PeeringDialerWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/peeringdialer_webhook_test.go b/control-plane/api/v1alpha1/peeringdialer_webhook_test.go index e8b206e3e6..abdca4f417 100644 --- a/control-plane/api/v1alpha1/peeringdialer_webhook_test.go +++ b/control-plane/api/v1alpha1/peeringdialer_webhook_test.go @@ -134,9 +134,10 @@ func TestValidatePeeringDialer(t *testing.T) { require.NoError(t, err) validator := &PeeringDialerWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/proxydefaults_webhook.go b/control-plane/api/v1alpha1/proxydefaults_webhook.go index 3873516074..4e221e0130 100644 --- a/control-plane/api/v1alpha1/proxydefaults_webhook.go +++ b/control-plane/api/v1alpha1/proxydefaults_webhook.go @@ -7,6 +7,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -16,9 +17,10 @@ import ( type ProxyDefaultsWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder - ConsulMeta common.ConsulMeta + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder + ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/proxydefaults_webhook_test.go b/control-plane/api/v1alpha1/proxydefaults_webhook_test.go index cc36d994dd..a728ddc130 100644 --- a/control-plane/api/v1alpha1/proxydefaults_webhook_test.go +++ b/control-plane/api/v1alpha1/proxydefaults_webhook_test.go @@ -118,9 +118,10 @@ func TestValidateProxyDefault(t *testing.T) { require.NoError(t, err) validator := &ProxyDefaultsWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/servicedefaults_types.go b/control-plane/api/v1alpha1/servicedefaults_types.go index 0a4020e010..b3902c4118 100644 --- a/control-plane/api/v1alpha1/servicedefaults_types.go +++ b/control-plane/api/v1alpha1/servicedefaults_types.go @@ -2,9 +2,6 @@ package v1alpha1 import ( "fmt" - "net" - "strings" - "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/consul-k8s/control-plane/api/common" @@ -15,6 +12,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" + "net" + "strings" ) const ( @@ -88,13 +87,6 @@ type ServiceDefaultsSpec struct { // MaxInboundConnections is the maximum number of concurrent inbound connections to // each service instance. Defaults to 0 (using consul's default) if not set. MaxInboundConnections int `json:"maxInboundConnections,omitempty"` - // The number of milliseconds allowed to make connections to the local application - // instance before timing out. Defaults to 5000. - LocalConnectTimeoutMs int `json:"localConnectTimeoutMs,omitempty"` - // In milliseconds, the timeout for HTTP requests to the local application instance. - // Applies to HTTP-based protocols only. If not specified, inherits the Envoy default for - // route timeouts (15s). - LocalRequestTimeoutMs int `json:"localRequestTimeoutMs,omitempty"` } type Upstreams struct { @@ -271,8 +263,6 @@ func (in *ServiceDefaults) ToConsul(datacenter string) capi.ConfigEntry { Destination: in.Spec.Destination.toConsul(), Meta: meta(datacenter), MaxInboundConnections: in.Spec.MaxInboundConnections, - LocalConnectTimeoutMs: in.Spec.LocalConnectTimeoutMs, - LocalRequestTimeoutMs: in.Spec.LocalRequestTimeoutMs, } } @@ -303,14 +293,6 @@ func (in *ServiceDefaults) Validate(consulMeta common.ConsulMeta) error { allErrs = append(allErrs, field.Invalid(path.Child("maxinboundconnections"), in.Spec.MaxInboundConnections, "MaxInboundConnections must be > 0")) } - if in.Spec.LocalConnectTimeoutMs < 0 { - allErrs = append(allErrs, field.Invalid(path.Child("localConnectTimeoutMs"), in.Spec.LocalConnectTimeoutMs, "LocalConnectTimeoutMs must be > 0")) - } - - if in.Spec.LocalRequestTimeoutMs < 0 { - allErrs = append(allErrs, field.Invalid(path.Child("localRequestTimeoutMs"), in.Spec.LocalRequestTimeoutMs, "LocalRequestTimeoutMs must be > 0")) - } - allErrs = append(allErrs, in.Spec.UpstreamConfig.validate(path.Child("upstreamConfig"), consulMeta.PartitionsEnabled)...) allErrs = append(allErrs, in.Spec.Expose.validate(path.Child("expose"))...) diff --git a/control-plane/api/v1alpha1/servicedefaults_types_test.go b/control-plane/api/v1alpha1/servicedefaults_types_test.go index e7fdae2575..c70c8a6408 100644 --- a/control-plane/api/v1alpha1/servicedefaults_types_test.go +++ b/control-plane/api/v1alpha1/servicedefaults_types_test.go @@ -146,8 +146,6 @@ func TestServiceDefaults_ToConsul(t *testing.T) { Port: 443, }, MaxInboundConnections: 20, - LocalConnectTimeoutMs: 5000, - LocalRequestTimeoutMs: 15000, }, }, &capi.ServiceConfigEntry{ @@ -254,8 +252,6 @@ func TestServiceDefaults_ToConsul(t *testing.T) { Port: 443, }, MaxInboundConnections: 20, - LocalConnectTimeoutMs: 5000, - LocalRequestTimeoutMs: 15000, Meta: map[string]string{ common.SourceKey: common.SourceValue, common.DatacenterKey: "datacenter", diff --git a/control-plane/api/v1alpha1/servicedefaults_webhook.go b/control-plane/api/v1alpha1/servicedefaults_webhook.go index f79e68bcde..a196a6d941 100644 --- a/control-plane/api/v1alpha1/servicedefaults_webhook.go +++ b/control-plane/api/v1alpha1/servicedefaults_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type ServiceDefaultsWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/serviceintentions_webhook.go b/control-plane/api/v1alpha1/serviceintentions_webhook.go index ddc6488690..0287ddfeb8 100644 --- a/control-plane/api/v1alpha1/serviceintentions_webhook.go +++ b/control-plane/api/v1alpha1/serviceintentions_webhook.go @@ -8,6 +8,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -17,9 +18,10 @@ import ( type ServiceIntentionsWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder - ConsulMeta common.ConsulMeta + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder + ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/serviceintentions_webhook_test.go b/control-plane/api/v1alpha1/serviceintentions_webhook_test.go index e6095e8351..17b1881c5a 100644 --- a/control-plane/api/v1alpha1/serviceintentions_webhook_test.go +++ b/control-plane/api/v1alpha1/serviceintentions_webhook_test.go @@ -249,9 +249,10 @@ func TestHandle_ServiceIntentions_Create(t *testing.T) { require.NoError(t, err) validator := &ServiceIntentionsWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, ConsulMeta: common.ConsulMeta{ NamespacesEnabled: true, Mirroring: c.mirror, @@ -438,9 +439,10 @@ func TestHandle_ServiceIntentions_Update(t *testing.T) { require.NoError(t, err) validator := &ServiceIntentionsWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, ConsulMeta: common.ConsulMeta{ NamespacesEnabled: true, Mirroring: c.mirror, @@ -598,9 +600,10 @@ func TestHandle_ServiceIntentions_Patches(t *testing.T) { require.NoError(t, err) validator := &ServiceIntentionsWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, ConsulMeta: common.ConsulMeta{ NamespacesEnabled: namespacesEnabled, Mirroring: true, diff --git a/control-plane/api/v1alpha1/serviceresolver_types.go b/control-plane/api/v1alpha1/serviceresolver_types.go index 4fc637b35f..2d5fc1e8c8 100644 --- a/control-plane/api/v1alpha1/serviceresolver_types.go +++ b/control-plane/api/v1alpha1/serviceresolver_types.go @@ -91,9 +91,6 @@ type ServiceResolverRedirect struct { // Datacenter is the datacenter to resolve the service from instead of the // current one. Datacenter string `json:"datacenter,omitempty"` - // Peer is the name of the cluster peer to resolve the service from instead - // of the current one. - Peer string `json:"peer,omitempty"` } type ServiceResolverSubsetMap map[string]ServiceResolverSubset @@ -126,23 +123,6 @@ type ServiceResolverFailover struct { Namespace string `json:"namespace,omitempty"` // Datacenters is a fixed list of datacenters to try during failover. Datacenters []string `json:"datacenters,omitempty"` - // Targets specifies a fixed list of failover targets to try during failover. - Targets []ServiceResolverFailoverTarget `json:"targets,omitempty"` -} - -type ServiceResolverFailoverTarget struct { - // Service specifies the name of the service to try during failover. - Service string `json:"service,omitempty"` - // ServiceSubset specifies the service subset to try during failover. - ServiceSubset string `json:"serviceSubset,omitempty"` - // Partition specifies the partition to try during failover. - Partition string `json:"partition,omitempty"` - // Namespace specifies the namespace to try during failover. - Namespace string `json:"namespace,omitempty"` - // Datacenter specifies the datacenter to try during failover. - Datacenter string `json:"datacenter,omitempty"` - // Peer specifies the name of the cluster peer to try during failover. - Peer string `json:"peer,omitempty"` } type LoadBalancer struct { @@ -367,8 +347,6 @@ func (in *ServiceResolverRedirect) toConsul() *capi.ServiceResolverRedirect { ServiceSubset: in.ServiceSubset, Namespace: in.Namespace, Datacenter: in.Datacenter, - Partition: in.Partition, - Peer: in.Peer, } } @@ -384,28 +362,11 @@ func (in ServiceResolverFailoverMap) toConsul() map[string]capi.ServiceResolverF } func (in ServiceResolverFailover) toConsul() capi.ServiceResolverFailover { - var targets []capi.ServiceResolverFailoverTarget - for _, target := range in.Targets { - targets = append(targets, target.toConsul()) - } - return capi.ServiceResolverFailover{ Service: in.Service, ServiceSubset: in.ServiceSubset, Namespace: in.Namespace, Datacenters: in.Datacenters, - Targets: targets, - } -} - -func (in ServiceResolverFailoverTarget) toConsul() capi.ServiceResolverFailoverTarget { - return capi.ServiceResolverFailoverTarget{ - Service: in.Service, - ServiceSubset: in.ServiceSubset, - Namespace: in.Namespace, - Partition: in.Partition, - Datacenter: in.Datacenter, - Peer: in.Peer, } } @@ -503,16 +464,12 @@ func (in *ServiceResolver) validateEnterprise(consulMeta common.ConsulMeta) fiel return errs } -func (in *ServiceResolverFailover) isEmpty() bool { - return in.Service == "" && in.ServiceSubset == "" && in.Namespace == "" && len(in.Datacenters) == 0 && len(in.Targets) == 0 -} - func (in *ServiceResolverFailover) validate(path *field.Path) *field.Error { - if in.isEmpty() { + if in.Service == "" && in.ServiceSubset == "" && in.Namespace == "" && len(in.Datacenters) == 0 { // NOTE: We're passing "{}" here as our value because we know that the // error is we have an empty object. return field.Invalid(path, "{}", - "service, serviceSubset, namespace, datacenters, and targets cannot all be empty at once") + "service, serviceSubset, namespace and datacenters cannot all be empty at once") } return nil } diff --git a/control-plane/api/v1alpha1/serviceresolver_types_test.go b/control-plane/api/v1alpha1/serviceresolver_types_test.go index fd4fc25a60..44b838cc50 100644 --- a/control-plane/api/v1alpha1/serviceresolver_types_test.go +++ b/control-plane/api/v1alpha1/serviceresolver_types_test.go @@ -59,7 +59,6 @@ func TestServiceResolver_MatchesConsul(t *testing.T) { ServiceSubset: "redirect_subset", Namespace: "redirect_namespace", Datacenter: "redirect_datacenter", - Peer: "redirect_peer", }, Failover: map[string]ServiceResolverFailover{ "failover1": { @@ -74,12 +73,6 @@ func TestServiceResolver_MatchesConsul(t *testing.T) { Namespace: "failover_namespace2", Datacenters: []string{"failover2_dc1", "failover2_dc2"}, }, - "failover3": { - Targets: []ServiceResolverFailoverTarget{ - {Peer: "failover_peer3"}, - {Partition: "failover_partition3", Namespace: "failover_namespace3"}, - }, - }, }, ConnectTimeout: metav1.Duration{Duration: 1 * time.Second}, LoadBalancer: &LoadBalancer{ @@ -126,7 +119,6 @@ func TestServiceResolver_MatchesConsul(t *testing.T) { ServiceSubset: "redirect_subset", Namespace: "redirect_namespace", Datacenter: "redirect_datacenter", - Peer: "redirect_peer", }, Failover: map[string]capi.ServiceResolverFailover{ "failover1": { @@ -141,12 +133,6 @@ func TestServiceResolver_MatchesConsul(t *testing.T) { Namespace: "failover_namespace2", Datacenters: []string{"failover2_dc1", "failover2_dc2"}, }, - "failover3": { - Targets: []capi.ServiceResolverFailoverTarget{ - {Peer: "failover_peer3"}, - {Partition: "failover_partition3", Namespace: "failover_namespace3"}, - }, - }, }, ConnectTimeout: 1 * time.Second, LoadBalancer: &capi.LoadBalancer{ @@ -242,7 +228,6 @@ func TestServiceResolver_ToConsul(t *testing.T) { ServiceSubset: "redirect_subset", Namespace: "redirect_namespace", Datacenter: "redirect_datacenter", - Partition: "redirect_partition", }, Failover: map[string]ServiceResolverFailover{ "failover1": { @@ -257,12 +242,6 @@ func TestServiceResolver_ToConsul(t *testing.T) { Namespace: "failover_namespace2", Datacenters: []string{"failover2_dc1", "failover2_dc2"}, }, - "failover3": { - Targets: []ServiceResolverFailoverTarget{ - {Peer: "failover_peer3"}, - {Partition: "failover_partition3", Namespace: "failover_namespace3"}, - }, - }, }, ConnectTimeout: metav1.Duration{Duration: 1 * time.Second}, LoadBalancer: &LoadBalancer{ @@ -309,7 +288,6 @@ func TestServiceResolver_ToConsul(t *testing.T) { ServiceSubset: "redirect_subset", Namespace: "redirect_namespace", Datacenter: "redirect_datacenter", - Partition: "redirect_partition", }, Failover: map[string]capi.ServiceResolverFailover{ "failover1": { @@ -324,12 +302,6 @@ func TestServiceResolver_ToConsul(t *testing.T) { Namespace: "failover_namespace2", Datacenters: []string{"failover2_dc1", "failover2_dc2"}, }, - "failover3": { - Targets: []capi.ServiceResolverFailoverTarget{ - {Peer: "failover_peer3"}, - {Partition: "failover_partition3", Namespace: "failover_namespace3"}, - }, - }, }, ConnectTimeout: 1 * time.Second, LoadBalancer: &capi.LoadBalancer{ @@ -595,8 +567,8 @@ func TestServiceResolver_Validate(t *testing.T) { }, namespacesEnabled: false, expectedErrMsgs: []string{ - "spec.failover[failA]: Invalid value: \"{}\": service, serviceSubset, namespace, datacenters, and targets cannot all be empty at once", - "spec.failover[failB]: Invalid value: \"{}\": service, serviceSubset, namespace, datacenters, and targets cannot all be empty at once", + "spec.failover[failA]: Invalid value: \"{}\": service, serviceSubset, namespace and datacenters cannot all be empty at once", + "spec.failover[failB]: Invalid value: \"{}\": service, serviceSubset, namespace and datacenters cannot all be empty at once", }, }, "hashPolicy.field invalid": { diff --git a/control-plane/api/v1alpha1/serviceresolver_webhook.go b/control-plane/api/v1alpha1/serviceresolver_webhook.go index ca5f9d9482..1af2fa0383 100644 --- a/control-plane/api/v1alpha1/serviceresolver_webhook.go +++ b/control-plane/api/v1alpha1/serviceresolver_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type ServiceResolverWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/servicerouter_webhook.go b/control-plane/api/v1alpha1/servicerouter_webhook.go index f6837fcf7b..03644432e6 100644 --- a/control-plane/api/v1alpha1/servicerouter_webhook.go +++ b/control-plane/api/v1alpha1/servicerouter_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type ServiceRouterWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/servicesplitter_webhook.go b/control-plane/api/v1alpha1/servicesplitter_webhook.go index c0020c88b8..f90c49f45a 100644 --- a/control-plane/api/v1alpha1/servicesplitter_webhook.go +++ b/control-plane/api/v1alpha1/servicesplitter_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type ServiceSplitterWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/terminatinggateway_webhook.go b/control-plane/api/v1alpha1/terminatinggateway_webhook.go index b0427b87ca..2d3367fcaa 100644 --- a/control-plane/api/v1alpha1/terminatinggateway_webhook.go +++ b/control-plane/api/v1alpha1/terminatinggateway_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type TerminatingGatewayWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/zz_generated.deepcopy.go b/control-plane/api/v1alpha1/zz_generated.deepcopy.go index 37cab374e5..af7ec840a1 100644 --- a/control-plane/api/v1alpha1/zz_generated.deepcopy.go +++ b/control-plane/api/v1alpha1/zz_generated.deepcopy.go @@ -811,11 +811,6 @@ func (in *MeshSpec) DeepCopyInto(out *MeshSpec) { *out = new(MeshHTTPConfig) **out = **in } - if in.Peering != nil { - in, out := &in.Peering, &out.Peering - *out = new(PeeringMeshConfig) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshSpec. @@ -857,11 +852,6 @@ func (in *MeshTLSConfig) DeepCopy() *MeshTLSConfig { func (in *PassiveHealthCheck) DeepCopyInto(out *PassiveHealthCheck) { *out = *in out.Interval = in.Interval - if in.EnforcingConsecutive5xx != nil { - in, out := &in.EnforcingConsecutive5xx, &out.EnforcingConsecutive5xx - *out = new(uint32) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PassiveHealthCheck. @@ -1124,21 +1114,6 @@ func (in *PeeringDialerStatus) DeepCopy() *PeeringDialerStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PeeringMeshConfig) DeepCopyInto(out *PeeringMeshConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeeringMeshConfig. -func (in *PeeringMeshConfig) DeepCopy() *PeeringMeshConfig { - if in == nil { - return nil - } - out := new(PeeringMeshConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyDefaults) DeepCopyInto(out *ProxyDefaults) { *out = *in @@ -1528,11 +1503,6 @@ func (in *ServiceResolverFailover) DeepCopyInto(out *ServiceResolverFailover) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.Targets != nil { - in, out := &in.Targets, &out.Targets - *out = make([]ServiceResolverFailoverTarget, len(*in)) - copy(*out, *in) - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceResolverFailover. @@ -1566,21 +1536,6 @@ func (in ServiceResolverFailoverMap) DeepCopy() ServiceResolverFailoverMap { return *out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceResolverFailoverTarget) DeepCopyInto(out *ServiceResolverFailoverTarget) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceResolverFailoverTarget. -func (in *ServiceResolverFailoverTarget) DeepCopy() *ServiceResolverFailoverTarget { - if in == nil { - return nil - } - out := new(ServiceResolverFailoverTarget) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceResolverList) DeepCopyInto(out *ServiceResolverList) { *out = *in @@ -2245,7 +2200,7 @@ func (in *Upstream) DeepCopyInto(out *Upstream) { if in.PassiveHealthCheck != nil { in, out := &in.PassiveHealthCheck, &out.PassiveHealthCheck *out = new(PassiveHealthCheck) - (*in).DeepCopyInto(*out) + **out = **in } out.MeshGateway = in.MeshGateway } diff --git a/control-plane/build-support/functions/10-util.sh b/control-plane/build-support/functions/10-util.sh index b807d35397..90cfd9660e 100644 --- a/control-plane/build-support/functions/10-util.sh +++ b/control-plane/build-support/functions/10-util.sh @@ -643,7 +643,6 @@ function update_version_helm { # $1 - Path to the directory where the root of the Helm chart is # $2 - Version string # $3 - PreRelease version (if unset will become an empty string) - # $4 - Image base path # # Returns: # 0 - success @@ -670,10 +669,11 @@ function update_version_helm { then full_version="$2-$3" fi + local image_k8s="hashicorp\/consul-k8s-control-plane:$full_version" - sed_i ${SED_EXT} -e "s/(imageK8S:.*\/consul-k8s-control-plane:)[^\"]*/imageK8S: $4${full_version}/g" "${vfile}" + sed_i ${SED_EXT} -e "s/(imageK8S:[[:space:]]*hashicorp\/consul-k8s-control-plane:)[^\"]*/\1${full_version}/g" "${vfile}" sed_i ${SED_EXT} -e "s/(version:[[:space:]]*)[^\"]*/\1${full_version}/g" "${cfile}" - sed_i ${SED_EXT} -e "s/(image:.*\/consul-k8s-control-plane:)[^\"]*/image: $4${full_version}/g" "${cfile}" + sed_i ${SED_EXT} -e "s/(image:[[:space:]]*hashicorp\/consul-k8s-control-plane:)[^\"]*/\1${full_version}/g" "${cfile}" if test -z "$3" then @@ -778,7 +778,6 @@ function set_version { # $2 - The version of the release # $3 - The release date # $4 - The pre-release version - # $5 - The helm docker image base path # # # Returns: @@ -815,7 +814,7 @@ function set_version { fi status_stage "==> Updating Helm chart versions with version info: ${vers} "$4"" - if ! update_version_helm "${sdir}/charts/consul" "${vers}" "$4" "$5" + if ! update_version_helm "${sdir}/charts/consul" "${vers}" "$4" then unset_changelog_version "${sdir}" return 1 @@ -864,7 +863,7 @@ function prepare_release { # 0 - success # * - error echo "release version: " $1 $2 $3 $4 - set_version "$1" "$2" "$3" "$4" "hashicorp\/consul-k8s-control-plane:" + set_version "$1" "$2" "$3" "$4" set_changelog "$1" "$2" "$3" "$4" } @@ -885,7 +884,7 @@ function prepare_dev { local sdir="$1" set_changelog "$1" "$2" "$3" "$5" - set_version "$1" "$4" "$3" "dev" "docker.mirror.hashicorp.services\/hashicorppreview\/consul-k8s-control-plane:" + set_version "$1" "$4" "$3" "dev" status_stage "==> Adding new UNRELEASED label in CHANGELOG.md" add_unreleased_to_changelog "${sdir}" || return 1 diff --git a/control-plane/catalog/to-consul/consul_node_services_client.go b/control-plane/catalog/to-consul/consul_node_services_client.go new file mode 100644 index 0000000000..1e3d01003f --- /dev/null +++ b/control-plane/catalog/to-consul/consul_node_services_client.go @@ -0,0 +1,114 @@ +package catalog + +import ( + "fmt" + + "github.com/hashicorp/consul/api" +) + +// ConsulService is service registered in Consul. +type ConsulService struct { + // Namespace is the Consul namespace the service is registered in. + // If namespaces are disabled this will always be the empty string even + // though the namespace is technically "default". + Namespace string + // Name is the name of the service in Consul. + Name string +} + +// ConsulNodeServicesClient is used to query for node services. +type ConsulNodeServicesClient interface { + // NodeServices returns consul services with the corresponding tag + // registered to the Consul node with nodeName. opts is used as the + // query options in the API call to consul. It returns the list of services + // (not service instances) and the query meta from the API call. + NodeServices(tag string, nodeName string, opts api.QueryOptions) ([]ConsulService, *api.QueryMeta, error) +} + +// PreNamespacesNodeServicesClient implements ConsulNodeServicesClient +// for Consul < 1.7 which does not support namespaces. +type PreNamespacesNodeServicesClient struct { + Client *api.Client +} + +// NodeServices returns Consul services tagged with +// tag registered on nodeName using a Consul API that is supported in +// Consul versions before 1.7. Consul versions after 1.7 still support +// this API but the API is not namespace-aware. +func (s *PreNamespacesNodeServicesClient) NodeServices( + tag string, + nodeName string, + opts api.QueryOptions) ([]ConsulService, *api.QueryMeta, error) { + // NOTE: We're not using tag filtering here so we can support Consul + // < 1.5. + node, meta, err := s.Client.Catalog().Node(nodeName, &opts) + if err != nil { + return nil, nil, err + } + if node == nil { + return nil, meta, nil + } + + var svcs []ConsulService + // seenServices is used to ensure the svcs list is unique. + seenServices := make(map[string]bool) + for _, svcInstance := range node.Services { + svcName := svcInstance.Service + if _, ok := seenServices[svcName]; ok { + continue + } + for _, svcTag := range svcInstance.Tags { + if svcTag == tag { + if _, ok := seenServices[svcName]; !ok { + svcs = append(svcs, ConsulService{ + // If namespaces are not enabled we use empty + // string. + Namespace: "", + Name: svcName, + }) + seenServices[svcName] = true + } + break + } + } + } + return svcs, meta, nil +} + +// NamespacesNodeServicesClient implements ConsulNodeServicesClient +// for Consul >= 1.7 which supports namespaces. +type NamespacesNodeServicesClient struct { + Client *api.Client +} + +// NodeServices returns Consul services tagged with +// tag registered on nodeName using a Consul API that is supported in +// Consul versions >= 1.7. If opts.Namespace is set to +// "*", services from all namespaces will be returned. +func (s *NamespacesNodeServicesClient) NodeServices( + tag string, + nodeName string, + opts api.QueryOptions) ([]ConsulService, *api.QueryMeta, error) { + opts.Filter = fmt.Sprintf("\"%s\" in Tags", tag) + nodeCatalog, meta, err := s.Client.Catalog().NodeServiceList(nodeName, &opts) + if err != nil { + return nil, nil, err + } + + var svcs []ConsulService + // seenServices is used to ensure the svcs list is unique. Its keys are + // /. + seenSvcs := make(map[string]bool) + for _, svcInstance := range nodeCatalog.Services { + svcName := svcInstance.Service + key := fmt.Sprintf("%s/%s", svcInstance.Namespace, svcName) + if _, ok := seenSvcs[key]; !ok { + svcs = append(svcs, ConsulService{ + Namespace: svcInstance.Namespace, + Name: svcName, + }) + seenSvcs[key] = true + } + } + return svcs, meta, nil +} diff --git a/control-plane/catalog/to-consul/consul_node_services_client_ent_test.go b/control-plane/catalog/to-consul/consul_node_services_client_ent_test.go new file mode 100644 index 0000000000..ac570948f5 --- /dev/null +++ b/control-plane/catalog/to-consul/consul_node_services_client_ent_test.go @@ -0,0 +1,362 @@ +//go:build enterprise + +package catalog + +import ( + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" +) + +// Test the Consul 1.7 client against Consul Enterprise. +func TestNamespacesNodeServicesClient_NodeServices(t *testing.T) { + t.Parallel() + cases := map[string]struct { + ConsulServices []api.CatalogRegistration + Exp []ConsulService + }{ + "no services": { + ConsulServices: nil, + Exp: nil, + }, + "no services on k8s node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + }, + }, + }, + Exp: nil, + }, + "service with k8s tag on different node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without any tags": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: nil, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"not-k8s", "foo"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node with k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + }, + }, + "multiple services": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc1-id", + Service: "svc1", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc1", + }, + { + Namespace: "default", + Name: "svc2", + }, + }, + }, + "multiple service instances": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + }, + }, + "services across multiple namespaces": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-ns-id", + Service: "svc-ns", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + { + Namespace: "ns", + Name: "svc-ns", + }, + }, + }, + "services with same name across multiple namespaces": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + { + Namespace: "ns", + Name: "svc", + }, + }, + }, + "multiple services across multiple namespaces": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id1", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id1", + Service: "svc2", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + { + Namespace: "default", + Name: "svc2", + }, + { + Namespace: "ns", + Name: "svc", + }, + { + Namespace: "ns", + Name: "svc2", + }, + }, + }, + } + + for name, c := range cases { + if name != "multiple services across multiple namespaces" { + continue + } + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + svr, err := testutil.NewTestServerConfigT(tt, nil) + require.NoError(err) + defer svr.Stop() + + consulClient, err := api.NewClient(&api.Config{ + Address: svr.HTTPAddr, + }) + require.NoError(err) + for _, registration := range c.ConsulServices { + if registration.Service.Namespace != "" && registration.Service.Namespace != "default" { + _, _, err = consulClient.Namespaces().Create(&api.Namespace{ + Name: registration.Service.Namespace, + }, nil) + require.NoError(err) + } + _, err = consulClient.Catalog().Register(®istration, nil) + require.NoError(err) + } + + client := NamespacesNodeServicesClient{ + Client: consulClient, + } + svcs, _, err := client.NodeServices("k8s", ConsulSyncNodeName, api.QueryOptions{ + Namespace: "*", + }) + require.NoError(err) + require.Len(svcs, len(c.Exp)) + for _, expSvc := range c.Exp { + require.Contains(svcs, expSvc) + } + }) + } +} diff --git a/control-plane/catalog/to-consul/consul_node_services_client_test.go b/control-plane/catalog/to-consul/consul_node_services_client_test.go new file mode 100644 index 0000000000..83354e640a --- /dev/null +++ b/control-plane/catalog/to-consul/consul_node_services_client_test.go @@ -0,0 +1,184 @@ +package catalog + +import ( + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" +) + +func TestPreNamespacesNodeServicesClient_NodeServices(t *testing.T) { + t.Parallel() + cases := map[string]struct { + ConsulServices []api.CatalogRegistration + Exp []ConsulService + }{ + "no services": { + ConsulServices: nil, + Exp: nil, + }, + "no services on k8s node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + }, + }, + }, + Exp: nil, + }, + "service with k8s tag on different node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without any tags": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: nil, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"not-k8s", "foo"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node with k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "", + Name: "svc", + }, + }, + }, + "multiple services": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc1-id", + Service: "svc1", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "", + Name: "svc1", + }, + { + Namespace: "", + Name: "svc2", + }, + }, + }, + "multiple service instances": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "", + Name: "svc", + }, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + svr, err := testutil.NewTestServerConfigT(tt, nil) + require.NoError(err) + defer svr.Stop() + + consulClient, err := api.NewClient(&api.Config{ + Address: svr.HTTPAddr, + }) + require.NoError(err) + for _, registration := range c.ConsulServices { + _, err = consulClient.Catalog().Register(®istration, nil) + require.NoError(err) + } + + client := PreNamespacesNodeServicesClient{ + Client: consulClient, + } + svcs, _, err := client.NodeServices("k8s", ConsulSyncNodeName, api.QueryOptions{}) + require.NoError(err) + require.Len(svcs, len(c.Exp)) + for _, expSvc := range c.Exp { + require.Contains(svcs, expSvc) + } + }) + } +} diff --git a/control-plane/catalog/to-consul/resource.go b/control-plane/catalog/to-consul/resource.go index 96538510d1..239e2f5db9 100644 --- a/control-plane/catalog/to-consul/resource.go +++ b/control-plane/catalog/to-consul/resource.go @@ -493,7 +493,7 @@ func (t *ServiceResource) generateRegistrations(key string) { continue } - if _, ok = seen[addr]; ok { + if _, ok := seen[addr]; ok { continue } seen[addr] = struct{}{} diff --git a/control-plane/catalog/to-consul/syncer.go b/control-plane/catalog/to-consul/syncer.go index 19e0aaca6f..2e2edad61a 100644 --- a/control-plane/catalog/to-consul/syncer.go +++ b/control-plane/catalog/to-consul/syncer.go @@ -2,13 +2,11 @@ package catalog import ( "context" - "fmt" "sync" "time" "github.com/cenkalti/backoff" mapset "github.com/deckarep/golang-set" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" @@ -38,12 +36,8 @@ type Syncer interface { // services and ensures the local set of registrations represents the // source of truth, overwriting any external changes to the services. type ConsulSyncer struct { - // ConsulClientConfig is the config for the Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager - - Log hclog.Logger + Client *api.Client + Log hclog.Logger // EnableNamespaces indicates that a user is running Consul Enterprise // with version 1.7+ which is namespace aware. It enables Consul namespaces, @@ -75,6 +69,10 @@ type ConsulSyncer struct { // The Consul node name to register services with. ConsulNodeName string + // ConsulNodeServicesClient is used to list services for a node. We use a + // separate client for this API call that handles older version of Consul. + ConsulNodeServicesClient ConsulNodeServicesClient + lock sync.Mutex once sync.Once @@ -174,7 +172,6 @@ func (s *ConsulSyncer) watchReapableServices(ctx context.Context) { AllowStale: true, WaitIndex: 1, WaitTime: 1 * time.Minute, - Filter: fmt.Sprintf("\"%s\" in Tags", s.ConsulK8STag), } if s.EnableNamespaces { @@ -186,17 +183,11 @@ func (s *ConsulSyncer) watchReapableServices(ctx context.Context) { minWait := s.SyncPeriod / 4 minWaitCh := time.After(0) for { - // Create a new consul client. - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client", "err", err) - return - } - - var services *api.CatalogNodeServiceList + var services []ConsulService var meta *api.QueryMeta - err = backoff.Retry(func() error { - services, meta, err = consulClient.Catalog().NodeServiceList(s.ConsulNodeName, opts) + err := backoff.Retry(func() error { + var err error + services, meta, err = s.ConsulNodeServicesClient.NodeServices(s.ConsulK8STag, s.ConsulNodeName, *opts) return err }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) @@ -226,30 +217,25 @@ func (s *ConsulSyncer) watchReapableServices(ctx context.Context) { s.lock.Lock() // Go through the service array and find services that should be reaped - for _, service := range services.Services { + for _, service := range services { // Check that the namespace exists in the valid service names map // before checking whether it contains the service - svcNs := service.Namespace - if !s.EnableNamespaces { - // Set namespace to empty when namespaces are not enabled. - svcNs = "" - } - if _, ok := s.serviceNames[svcNs]; ok { + if _, ok := s.serviceNames[service.Namespace]; ok { // We only care if we don't know about this service at all. - if s.serviceNames[svcNs].Contains(service.Service) { + if s.serviceNames[service.Namespace].Contains(service.Name) { s.Log.Debug("[watchReapableServices] serviceNames contains service", - "namespace", svcNs, - "service-name", service.Service) + "namespace", service.Namespace, + "service-name", service.Name) continue } } s.Log.Info("invalid service found, scheduling for delete", - "service-name", service.Service, "service-id", service.ID, "service-consul-namespace", svcNs) - if err = s.scheduleReapServiceLocked(service.Service, svcNs); err != nil { + "service-name", service.Name, "service-consul-namespace", service.Namespace) + if err := s.scheduleReapServiceLocked(service.Name, service.Namespace); err != nil { s.Log.Info("error querying service for delete", - "service-name", service.Service, - "service-consul-namespace", svcNs, + "service-name", service.Name, + "service-consul-namespace", service.Namespace, "err", err) } } @@ -283,16 +269,11 @@ func (s *ConsulSyncer) watchService(ctx context.Context, name, namespace string) queryOpts.Namespace = namespace } - // Create a new consul client. - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client; will retry", "err", err) - continue - } // Wait for service changes var services []*api.CatalogService - err = backoff.Retry(func() error { - services, _, err = consulClient.Catalog().Service(name, s.ConsulK8STag, queryOpts) + err := backoff.Retry(func() error { + var err error + services, _, err = s.Client.Catalog().Service(name, s.ConsulK8STag, queryOpts) return err }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) if err != nil { @@ -344,15 +325,8 @@ func (s *ConsulSyncer) scheduleReapServiceLocked(name, namespace string) error { opts.Namespace = namespace } - // Create a new consul client. - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client", "err", err) - return err - } - // Only consider services that are tagged from k8s - services, _, err := consulClient.Catalog().Service(name, s.ConsulK8STag, &opts) + services, _, err := s.Client.Catalog().Service(name, s.ConsulK8STag, &opts) if err != nil { return err } @@ -383,13 +357,6 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { s.lock.Lock() defer s.lock.Unlock() - // Create a new consul client. - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client", "err", err) - return - } - s.Log.Info("registering services") // Update the service watchers @@ -424,13 +391,13 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { } } - // Do all deregistrations first. + // Do all deregistrations first for _, r := range s.deregs { s.Log.Info("deregistering service", "node-name", r.Node, "service-id", r.ServiceID, "service-consul-namespace", r.Namespace) - _, err = consulClient.Catalog().Deregister(r, nil) + _, err := s.Client.Catalog().Deregister(r, nil) if err != nil { s.Log.Warn("error deregistering service", "node-name", r.Node, @@ -448,7 +415,7 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { for _, services := range s.namespaces { for _, r := range services { if s.EnableNamespaces { - _, err = namespaces.EnsureExists(consulClient, r.Service.Namespace, s.CrossNamespaceACLPolicy) + _, err := namespaces.EnsureExists(s.Client, r.Service.Namespace, s.CrossNamespaceACLPolicy) if err != nil { s.Log.Warn("error checking and creating Consul namespace", "node-name", r.Node, @@ -459,8 +426,8 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { } } - // Register the service. - _, err = consulClient.Catalog().Register(r, nil) + // Register the service + _, err := s.Client.Catalog().Register(r, nil) if err != nil { s.Log.Warn("error registering service", "node-name", r.Node, diff --git a/control-plane/catalog/to-consul/syncer_ent_test.go b/control-plane/catalog/to-consul/syncer_ent_test.go index fbe2cbd494..2cc206f908 100644 --- a/control-plane/catalog/to-consul/syncer_ent_test.go +++ b/control-plane/catalog/to-consul/syncer_ent_test.go @@ -5,8 +5,8 @@ package catalog import ( "testing" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" ) @@ -15,11 +15,20 @@ import ( func TestConsulSyncer_ConsulNamespaces(t *testing.T) { t.Parallel() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) - s, closer := testConsulSyncerWithConfig(testClient, func(s *ConsulSyncer) { + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { s.EnableNamespaces = true + s.ConsulNodeServicesClient = &NamespacesNodeServicesClient{ + Client: client, + } }) defer closer() @@ -57,11 +66,20 @@ func TestConsulSyncer_ConsulNamespaces(t *testing.T) { func TestConsulSyncer_ReapConsulNamespace(t *testing.T) { t.Parallel() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() - s, closer := testConsulSyncerWithConfig(testClient, func(s *ConsulSyncer) { + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { s.EnableNamespaces = true + s.ConsulNodeServicesClient = &NamespacesNodeServicesClient{ + Client: client, + } }) defer closer() @@ -117,11 +135,18 @@ func TestConsulSyncer_ReapConsulNamespace(t *testing.T) { func TestConsulSyncer_reapServiceInstanceNamespacesEnabled(t *testing.T) { t.Parallel() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() - s, closer := testConsulSyncerWithConfig(testClient, func(s *ConsulSyncer) { + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { s.EnableNamespaces = true + s.ConsulNodeServicesClient = &NamespacesNodeServicesClient{ + Client: client, + } }) defer closer() @@ -132,7 +157,7 @@ func TestConsulSyncer_reapServiceInstanceNamespacesEnabled(t *testing.T) { }) // Create an invalid instance service directly in Consul. - _, _, err := client.Namespaces().Create(&api.Namespace{ + _, _, err = client.Namespaces().Create(&api.Namespace{ Name: "foo", }, nil) require.NoError(t, err) diff --git a/control-plane/catalog/to-consul/syncer_test.go b/control-plane/catalog/to-consul/syncer_test.go index d8d9b0f402..f42f6fee46 100644 --- a/control-plane/catalog/to-consul/syncer_test.go +++ b/control-plane/catalog/to-consul/syncer_test.go @@ -5,14 +5,11 @@ import ( "fmt" "net/http" "net/http/httptest" - "net/url" - "strconv" "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" @@ -26,13 +23,19 @@ const ( func TestConsulSyncer_register(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client, syncer - // Create test consulServer server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() - s, closer := testConsulSyncer(testClient) + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) + + s, closer := testConsulSyncer(client) defer closer() // Sync @@ -54,9 +57,9 @@ func TestConsulSyncer_register(t *testing.T) { }) // Verify the settings - require.Equal(t, "k8s-sync", service.Node) - require.Equal(t, "bar", service.ServiceName) - require.Equal(t, "127.0.0.1", service.Address) + require.Equal("k8s-sync", service.Node) + require.Equal("bar", service.ServiceName) + require.Equal("127.0.0.1", service.Address) } // Test that the syncer reaps individual invalid service instances. @@ -66,11 +69,19 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { for _, node := range []string{ConsulSyncNodeName, "test-node"} { name := fmt.Sprintf("consul node name: %s", node) t.Run(name, func(t *testing.T) { + require := require.New(t) + // Set up server, client, syncer - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() - s, closer := testConsulSyncer(testClient) + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) + + s, closer := testConsulSyncer(client) defer closer() // Sync @@ -80,7 +91,7 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { // Wait for the first service retry.Run(t, func(r *retry.R) { - services, _, err := client.Catalog().Service("bar", s.ConsulK8STag, nil) + services, _, err := client.Catalog().Service("bar", "", nil) if err != nil { r.Fatalf("err: %s", err) } @@ -92,14 +103,13 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { // Create an invalid service directly in Consul svc := testRegistration(node, "bar", "default") svc.Service.ID = serviceID(node, "bar2") - fmt.Println("invalid service id", svc.Service.ID) - _, err := client.Catalog().Register(svc, nil) - require.NoError(t, err) + _, err = client.Catalog().Register(svc, nil) + require.NoError(err) // Valid service should exist var service *api.CatalogService retry.Run(t, func(r *retry.R) { - services, _, err := client.Catalog().Service("bar", s.ConsulK8STag, nil) + services, _, err := client.Catalog().Service("bar", "", nil) if err != nil { r.Fatalf("err: %s", err) } @@ -110,10 +120,10 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { }) // Verify the settings - require.Equal(t, serviceID(node, "bar"), service.ServiceID) - require.Equal(t, node, service.Node) - require.Equal(t, "bar", service.ServiceName) - require.Equal(t, "127.0.0.1", service.Address) + require.Equal(serviceID(node, "bar"), service.ServiceID) + require.Equal(node, service.Node) + require.Equal("bar", service.ServiceName) + require.Equal("127.0.0.1", service.Address) }) } } @@ -126,10 +136,17 @@ func TestConsulSyncer_reapService(t *testing.T) { sourceK8sNamespaceAnnotations := []string{"", "other", "default"} for _, k8sNS := range sourceK8sNamespaceAnnotations { t.Run(k8sNS, func(tt *testing.T) { - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + // Set up server, client, syncer + a, err := testutil.NewTestServerConfigT(tt, nil) + require.NoError(tt, err) + defer a.Stop() - s, closer := testConsulSyncer(testClient) + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(tt, err) + + s, closer := testConsulSyncer(client) defer closer() // Run the sync with a test service @@ -141,7 +158,7 @@ func TestConsulSyncer_reapService(t *testing.T) { // expect it to be deleted. svc := testRegistration(ConsulSyncNodeName, "baz", "default") svc.Service.Meta[ConsulK8SNS] = k8sNS - _, err := client.Catalog().Register(svc, nil) + _, err = client.Catalog().Register(svc, nil) require.NoError(tt, err) retry.Run(tt, func(r *retry.R) { @@ -168,9 +185,14 @@ func TestConsulSyncer_reapService(t *testing.T) { func TestConsulSyncer_noReapingUntilInitialSync(t *testing.T) { t.Parallel() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient - s, closer := testConsulSyncerWithConfig(testClient, func(s *ConsulSyncer) { + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { // Set the sync period to 5ms so we know it will have run at least once // after we wait 100ms. s.SyncPeriod = 5 * time.Millisecond @@ -181,7 +203,7 @@ func TestConsulSyncer_noReapingUntilInitialSync(t *testing.T) { // synthetic sync node and has the sync-associated tag, we expect // it to be deleted but not until the initial sync is performed. svc := testRegistration(ConsulSyncNodeName, "baz", "default") - _, err := client.Catalog().Register(svc, nil) + _, err = client.Catalog().Register(svc, nil) require.NoError(t, err) // We wait until the syncer has had the time to delete the service. @@ -198,7 +220,7 @@ func TestConsulSyncer_noReapingUntilInitialSync(t *testing.T) { s.Sync(nil) // The service should get deleted. retry.Run(t, func(r *retry.R) { - bazInstances, _, err = client.Catalog().Service("baz", "", nil) + bazInstances, _, err := client.Catalog().Service("baz", "", nil) require.NoError(r, err) require.Len(r, bazInstances, 0) }) @@ -222,19 +244,12 @@ func TestConsulSyncer_stopsGracefully(t *testing.T) { })) defer consulServer.Close() - parsedURL, err := url.Parse(consulServer.URL) - require.NoError(t, err) - - port, err := strconv.Atoi(parsedURL.Port()) - require.NoError(t, err) - - testClient := &test.TestServerClient{ - Cfg: &consul.Config{APIClientConfig: &api.Config{}, HTTPPort: port}, - Watcher: test.MockConnMgrForIPAndPort(parsedURL.Host, port), - } - // Start the syncer. - s, closer := testConsulSyncer(testClient) + client, err := api.NewClient(&api.Config{ + Address: consulServer.URL, + }) + require.NoError(t, err) + s, closer := testConsulSyncer(client) // Sync s.Sync([]*api.CatalogRegistration{ @@ -267,21 +282,23 @@ func testRegistration(node, service, k8sSrcNamespace string) *api.CatalogRegistr } } -func testConsulSyncer(testClient *test.TestServerClient) (*ConsulSyncer, func()) { - return testConsulSyncerWithConfig(testClient, func(syncer *ConsulSyncer) {}) +func testConsulSyncer(client *api.Client) (*ConsulSyncer, func()) { + return testConsulSyncerWithConfig(client, func(syncer *ConsulSyncer) {}) } // testConsulSyncerWithConfig starts a consul syncer that can be configured // prior to starting via the configurator method. -func testConsulSyncerWithConfig(testClient *test.TestServerClient, configurator func(*ConsulSyncer)) (*ConsulSyncer, func()) { +func testConsulSyncerWithConfig(client *api.Client, configurator func(*ConsulSyncer)) (*ConsulSyncer, func()) { s := &ConsulSyncer{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Log: hclog.Default(), - SyncPeriod: 200 * time.Millisecond, - ServicePollPeriod: 50 * time.Millisecond, - ConsulK8STag: TestConsulK8STag, - ConsulNodeName: ConsulSyncNodeName, + Client: client, + Log: hclog.Default(), + SyncPeriod: 200 * time.Millisecond, + ServicePollPeriod: 50 * time.Millisecond, + ConsulK8STag: TestConsulK8STag, + ConsulNodeName: ConsulSyncNodeName, + ConsulNodeServicesClient: &PreNamespacesNodeServicesClient{ + Client: client, + }, } configurator(s) s.init() diff --git a/control-plane/catalog/to-k8s/source.go b/control-plane/catalog/to-k8s/source.go index 5a384e760a..410dbc60da 100644 --- a/control-plane/catalog/to-k8s/source.go +++ b/control-plane/catalog/to-k8s/source.go @@ -6,7 +6,6 @@ import ( "time" "github.com/cenkalti/backoff" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" ) @@ -14,15 +13,12 @@ import ( // Source is the source for the sync that watches Consul services and // updates a Sink whenever the set of services to register changes. type Source struct { - // ConsulClientConfig is the config for the Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager - Domain string // Consul DNS domain - Sink Sink // Sink is the sink to update with services - Prefix string // Prefix is a prefix to prepend to services - Log hclog.Logger // Logger - ConsulK8STag string // The tag value for services registered + Client *api.Client // Consul API client + Domain string // Consul DNS domain + Sink Sink // Sink is the sink to update with services + Prefix string // Prefix is a prefix to prepend to services + Log hclog.Logger // Logger + ConsulK8STag string // The tag value for services registered } // Run is the long-running runloop for watching Consul services and @@ -34,17 +30,12 @@ func (s *Source) Run(ctx context.Context) { WaitTime: 1 * time.Minute, }).WithContext(ctx) for { - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client", "err", err) - return - } - // Get all services with tags. var serviceMap map[string][]string var meta *api.QueryMeta - err = backoff.Retry(func() error { - serviceMap, meta, err = consulClient.Catalog().Services(opts) + err := backoff.Retry(func() error { + var err error + serviceMap, meta, err = s.Client.Catalog().Services(opts) return err }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) diff --git a/control-plane/catalog/to-k8s/source_test.go b/control-plane/catalog/to-k8s/source_test.go index ca00a1e954..d3ed4a8a26 100644 --- a/control-plane/catalog/to-k8s/source_test.go +++ b/control-plane/catalog/to-k8s/source_test.go @@ -6,9 +6,8 @@ import ( "testing" toconsul "github.com/hashicorp/consul-k8s/control-plane/catalog/to-consul" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" @@ -17,20 +16,27 @@ import ( // Test that the source works with services registered before hand. func TestSource_initServices(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", nil), nil) - require.NoError(t, err) + require.NoError(err) - _, sink, closer := testSource(testClient.Cfg, testClient.Watcher) + _, sink, closer := testSource(client) defer closer() var actual map[string]string @@ -48,29 +54,36 @@ func TestSource_initServices(t *testing.T) { "svcA": "svcA.service.test", "svcB": "svcB.service.test", } - require.Equal(t, expected, actual) + require.Equal(expected, actual) } // Test that we can specify a prefix to prepend to all destination services. func TestSource_prefix(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() - _, sink, closer := testSourceWithConfig(testClient.Cfg, testClient.Watcher, func(s *Source) { + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) + + _, sink, closer := testSourceWithConfig(client, func(s *Source) { s.Prefix = "foo-" }) defer closer() // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", nil), nil) - require.NoError(t, err) + require.NoError(err) var actual map[string]string retry.Run(t, func(r *retry.R) { @@ -87,26 +100,33 @@ func TestSource_prefix(t *testing.T) { "foo-svcA": "svcA.service.test", "foo-svcB": "svcB.service.test", } - require.Equal(t, expected, actual) + require.Equal(expected, actual) } // Test that the source ignores K8S services. func TestSource_ignoreK8S(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", []string{toconsul.TestConsulK8STag}), nil) - require.NoError(t, err) + require.NoError(err) - _, sink, closer := testSource(testClient.Cfg, testClient.Watcher) + _, sink, closer := testSource(client) defer closer() var actual map[string]string @@ -123,27 +143,34 @@ func TestSource_ignoreK8S(t *testing.T) { "consul": "consul.service.test", "svcA": "svcA.service.test", } - require.Equal(t, expected, actual) + require.Equal(expected, actual) } // Test that the source deletes services properly. func TestSource_deleteService(t *testing.T) { // Unable to be run in parallel with other tests that // check for the existence of `consul.service.test` + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", nil), nil) - require.NoError(t, err) + require.NoError(err) - _, sink, closer := testSource(testClient.Cfg, testClient.Watcher) + _, sink, closer := testSource(client) defer closer() var actual map[string]string @@ -159,7 +186,7 @@ func TestSource_deleteService(t *testing.T) { // Delete the service _, err = client.Catalog().Deregister(&api.CatalogDeregistration{ Node: "hostB", ServiceID: "svcB"}, nil) - require.NoError(t, err) + require.NoError(err) retry.Run(t, func(r *retry.R) { sink.Lock() @@ -176,7 +203,7 @@ func TestSource_deleteService(t *testing.T) { "consul": "consul.service.test", "svcA": "svcA.service.test", } - require.Equal(t, expected, actual) + require.Equal(expected, actual) } // Test that the source deletes services properly. This case tests @@ -184,20 +211,27 @@ func TestSource_deleteService(t *testing.T) { // anything. func TestSource_deleteServiceInstance(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", nil), nil) - require.NoError(t, err) + require.NoError(err) - _, sink, closer := testSource(testClient.Cfg, testClient.Watcher) + _, sink, closer := testSource(client) defer closer() var actual map[string]string @@ -213,7 +247,7 @@ func TestSource_deleteServiceInstance(t *testing.T) { // Delete the service _, err = client.Catalog().Deregister(&api.CatalogDeregistration{ Node: "hostB", ServiceID: "svcA"}, nil) - require.NoError(t, err) + require.NoError(err) retry.Run(t, func(r *retry.R) { sink.Lock() @@ -238,21 +272,20 @@ func testRegistration(node, service string, tags []string) *api.CatalogRegistrat } // testSource creates a Source and Sink for testing. -func testSource(clientCfg *consul.Config, connMgr consul.ServerConnectionManager) (*Source, *TestSink, func()) { - return testSourceWithConfig(clientCfg, connMgr, func(source *Source) {}) +func testSource(client *api.Client) (*Source, *TestSink, func()) { + return testSourceWithConfig(client, func(source *Source) {}) } // testSourceWithConfig starts a Source that can be configured // prior to starting via the configurator method. -func testSourceWithConfig(clientCfg *consul.Config, connMgr consul.ServerConnectionManager, configurator func(*Source)) (*Source, *TestSink, func()) { +func testSourceWithConfig(client *api.Client, configurator func(*Source)) (*Source, *TestSink, func()) { sink := &TestSink{} s := &Source{ - ConsulClientConfig: clientCfg, - ConsulServerConnMgr: connMgr, - Domain: "test", - Sink: sink, - Log: hclog.Default(), - ConsulK8STag: toconsul.TestConsulK8STag, + Client: client, + Domain: "test", + Sink: sink, + Log: hclog.Default(), + ConsulK8STag: toconsul.TestConsulK8STag, } configurator(s) diff --git a/control-plane/cni/go.mod b/control-plane/cni/go.mod index 61ce532efb..660b720d43 100644 --- a/control-plane/cni/go.mod +++ b/control-plane/cni/go.mod @@ -1,6 +1,7 @@ module github.com/hashicorp/consul-k8s/control-plane/cni require ( + github.com/cenkalti/backoff v2.1.1+incompatible github.com/containernetworking/cni v1.1.1 github.com/containernetworking/plugins v1.1.1 github.com/hashicorp/consul/sdk v0.9.0 @@ -30,6 +31,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.1.0 // indirect golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect @@ -48,6 +50,6 @@ require ( sigs.k8s.io/yaml v1.2.0 // indirect ) -replace github.com/hashicorp/consul/sdk => github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892 +replace github.com/hashicorp/consul/sdk v0.9.0 => github.com/hashicorp/consul/sdk v0.4.1-0.20220531155537-364758ef2f50 -go 1.19 +go 1.18 diff --git a/control-plane/cni/go.sum b/control-plane/cni/go.sum index 1188cc5dd4..03309565a1 100644 --- a/control-plane/cni/go.sum +++ b/control-plane/cni/go.sum @@ -34,6 +34,8 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -62,7 +64,6 @@ github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -131,8 +132,8 @@ github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9 github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892 h1:jw0NwPmNPr5CxAU04hACdj61JSaJBKZ0FdBo+kwfNp4= -github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/sdk v0.4.1-0.20220531155537-364758ef2f50 h1:GwbRRT+QxMRbYI608FGwTfcZ0iOVLX69B2ePjpQoyXw= +github.com/hashicorp/consul/sdk v0.4.1-0.20220531155537-364758ef2f50/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o= @@ -183,7 +184,6 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -211,11 +211,13 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -456,7 +458,6 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/control-plane/commands.go b/control-plane/commands.go index d3347cf29e..4b0afa6731 100644 --- a/control-plane/commands.go +++ b/control-plane/commands.go @@ -6,6 +6,7 @@ import ( cmdACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/acl-init" cmdConnectInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/connect-init" cmdConsulLogout "github.com/hashicorp/consul-k8s/control-plane/subcommand/consul-logout" + cmdConsulSidecar "github.com/hashicorp/consul-k8s/control-plane/subcommand/consul-sidecar" cmdController "github.com/hashicorp/consul-k8s/control-plane/subcommand/controller" cmdCreateFederationSecret "github.com/hashicorp/consul-k8s/control-plane/subcommand/create-federation-secret" cmdDeleteCompletedJob "github.com/hashicorp/consul-k8s/control-plane/subcommand/delete-completed-job" @@ -15,6 +16,7 @@ import ( cmdInstallCNI "github.com/hashicorp/consul-k8s/control-plane/subcommand/install-cni" cmdPartitionInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/partition-init" cmdServerACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/server-acl-init" + cmdServiceAddress "github.com/hashicorp/consul-k8s/control-plane/subcommand/service-address" cmdSyncCatalog "github.com/hashicorp/consul-k8s/control-plane/subcommand/sync-catalog" cmdTLSInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/tls-init" cmdVersion "github.com/hashicorp/consul-k8s/control-plane/subcommand/version" @@ -42,6 +44,10 @@ func init() { return &cmdInjectConnect.Command{UI: ui}, nil }, + "consul-sidecar": func() (cli.Command, error) { + return &cmdConsulSidecar.Command{UI: ui}, nil + }, + "consul-logout": func() (cli.Command, error) { return &cmdConsulLogout.Command{UI: ui}, nil }, @@ -62,6 +68,10 @@ func init() { return &cmdDeleteCompletedJob.Command{UI: ui}, nil }, + "service-address": func() (cli.Command, error) { + return &cmdServiceAddress.Command{UI: ui}, nil + }, + "get-consul-client-ca": func() (cli.Command, error) { return &cmdGetConsulClientCA.Command{UI: ui}, nil }, diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml index 7ad173afbf..54137f9cc1 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml @@ -56,18 +56,6 @@ spec: required: - sanitizeXForwardedClientCert type: object - peering: - description: Peering defines the peering configuration for the service - mesh. - properties: - peerThroughMeshGateways: - description: PeerThroughMeshGateways determines whether peering - traffic between control planes should flow through mesh gateways. - If enabled, Consul servers will advertise mesh gateway addresses - as their own. Additionally, mesh gateways will configure themselves - to expose the local servers using a peering-specific SNI. - type: boolean - type: object tls: description: TLS defines the TLS configuration for the service mesh. properties: diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_servicedefaults.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_servicedefaults.yaml index 944f494f98..ae815fce47 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_servicedefaults.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_servicedefaults.yaml @@ -106,17 +106,6 @@ spec: TLS SNI value to be changed to a non-connect value when federating with an external system. type: string - localConnectTimeoutMs: - description: The number of milliseconds allowed to make connections - to the local application instance before timing out. Defaults to - 5000. - type: integer - localRequestTimeoutMs: - description: In milliseconds, the timeout for HTTP requests to the - local application instance. Applies to HTTP-based protocols only. - If not specified, inherits the Envoy default for route timeouts - (15s). - type: integer maxInboundConnections: description: MaxInboundConnections is the maximum number of concurrent inbound connections to each service instance. Defaults to 0 (using diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_serviceresolvers.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_serviceresolvers.yaml index a84fc0bd88..1793f36e28 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_serviceresolvers.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_serviceresolvers.yaml @@ -81,37 +81,6 @@ spec: service to resolve as the failover group of instances. If empty the default subset for the requested service is used. type: string - targets: - description: Targets specifies a fixed list of failover targets - to try during failover. - items: - properties: - datacenter: - description: Datacenter specifies the datacenter to try - during failover. - type: string - namespace: - description: Namespace specifies the namespace to try - during failover. - type: string - partition: - description: Partition specifies the partition to try - during failover. - type: string - peer: - description: Peer specifies the name of the cluster peer - to try during failover. - type: string - service: - description: Service specifies the name of the service - to try during failover. - type: string - serviceSubset: - description: ServiceSubset specifies the service subset - to try during failover. - type: string - type: object - type: array type: object description: Failover controls when and how to reroute traffic to an alternate pool of service instances. The map is keyed by the @@ -221,10 +190,6 @@ spec: service from instead of the current partition. If empty the current partition is assumed. type: string - peer: - description: Peer is the name of the cluster peer to resolve the - service from instead of the current one. - type: string service: description: Service is a service to resolve instead of the current service. diff --git a/control-plane/connect-inject/annotations.go b/control-plane/connect-inject/annotations.go index 8beaeb5ae7..fa35959160 100644 --- a/control-plane/connect-inject/annotations.go +++ b/control-plane/connect-inject/annotations.go @@ -20,37 +20,6 @@ const ( // be set to a truthy or falsy value, as parseable by strconv.ParseBool. annotationInject = "consul.hashicorp.com/connect-inject" - // annotationGatewayKind is the key of the annotation that indicates pods - // that represent Consul Connect Gateways. This should be set to a - // value that is either "mesh", "ingress" or "terminating". - annotationGatewayKind = "consul.hashicorp.com/gateway-kind" - - // annotationGatewayConsulServiceName is the key of the annotation whose value - // is the service name with which the mesh gateway is registered. - annotationGatewayConsulServiceName = "consul.hashicorp.com/gateway-consul-service-name" - - // annotationMeshGatewayContainerPort is the key of the annotation whose value is - // used as the port and also registered as the LAN port when the mesh-gateway - // service is registered. - annotationMeshGatewayContainerPort = "consul.hashicorp.com/mesh-gateway-container-port" - - // annotationGatewayWANSource is the key of the annotation that determines which - // source to use to determine the wan address and wan port for the mesh-gateway - // service registration. - annotationGatewayWANSource = "consul.hashicorp.com/gateway-wan-address-source" - - // annotationGatewayWANAddress is the key of the annotation that when the source - // of the mesh-gateway is 'Static', is the value of the WAN address for the gateway. - annotationGatewayWANAddress = "consul.hashicorp.com/gateway-wan-address-static" - - // annotationGatewayWANPort is the key of the annotation whose value is the - // WAN port for the mesh-gateway service registration. - annotationGatewayWANPort = "consul.hashicorp.com/gateway-wan-port" - - // annotationGatewayNamespace is the key of the annotation that indicates the - // Consul namespace where a Terminating or Ingress Gateway pod is deployed. - annotationGatewayNamespace = "consul.hashicorp.com/gateway-namespace" - // annotationInjectMountVolumes is the key of the annotation that controls whether // the data volume that connect inject uses to store data including the Consul ACL token // is mounted to other containers in the pod. It is a comma-separated list of container names @@ -115,6 +84,12 @@ const ( annotationSidecarProxyMemoryLimit = "consul.hashicorp.com/sidecar-proxy-memory-limit" annotationSidecarProxyMemoryRequest = "consul.hashicorp.com/sidecar-proxy-memory-request" + // annotations for consul sidecar resource limits. + annotationConsulSidecarCPULimit = "consul.hashicorp.com/consul-sidecar-cpu-limit" + annotationConsulSidecarCPURequest = "consul.hashicorp.com/consul-sidecar-cpu-request" + annotationConsulSidecarMemoryLimit = "consul.hashicorp.com/consul-sidecar-memory-limit" + annotationConsulSidecarMemoryRequest = "consul.hashicorp.com/consul-sidecar-memory-request" + // annotations for sidecar volumes. annotationConsulSidecarUserVolume = "consul.hashicorp.com/consul-sidecar-user-volume" annotationConsulSidecarUserVolumeMount = "consul.hashicorp.com/consul-sidecar-user-volume-mount" diff --git a/control-plane/connect-inject/consul_dataplane_sidecar.go b/control-plane/connect-inject/consul_dataplane_sidecar.go deleted file mode 100644 index c07f5df9e3..0000000000 --- a/control-plane/connect-inject/consul_dataplane_sidecar.go +++ /dev/null @@ -1,369 +0,0 @@ -package connectinject - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/google/shlex" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" -) - -const ( - ConsulCAFile = "/consul/connect-inject/consul-ca.pem" - ConsulDataplaneDNSBindHost = "127.0.0.1" - ConsulDataplaneDNSBindPort = 8600 -) - -func (w *MeshWebhook) consulDataplaneSidecar(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { - resources, err := w.sidecarResources(pod) - if err != nil { - return corev1.Container{}, err - } - - // Extract the service account token's volume mount. - var bearerTokenFile string - var saTokenVolumeMount corev1.VolumeMount - if w.AuthMethod != "" { - saTokenVolumeMount, bearerTokenFile, err = findServiceAccountVolumeMount(pod, mpi.serviceName) - if err != nil { - return corev1.Container{}, err - } - } - - multiPort := mpi.serviceName != "" - cmd, err := w.getContainerSidecarCommand(namespace, mpi, bearerTokenFile, pod) - if err != nil { - return corev1.Container{}, err - } - - containerName := sidecarContainer - if multiPort { - containerName = fmt.Sprintf("%s-%s", sidecarContainer, mpi.serviceName) - } - - probe := &corev1.Probe{ - Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(EnvoyInboundListenerPort + mpi.serviceIndex), - }, - }, - InitialDelaySeconds: 1, - } - container := corev1.Container{ - Name: containerName, - Image: w.ImageConsulDataplane, - Resources: resources, - // We need to set tmp dir to an ephemeral volume that we're mounting so that - // consul-dataplane can write files to it. Otherwise, it wouldn't be able to - // because we set file system to be read-only. - Env: []corev1.EnvVar{ - { - Name: "TMPDIR", - Value: "/consul/connect-inject", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - }, - Command: cmd, - ReadinessProbe: probe, - LivenessProbe: probe, - } - - if w.AuthMethod != "" { - container.VolumeMounts = append(container.VolumeMounts, saTokenVolumeMount) - } - - // Add any extra VolumeMounts. - if _, ok := pod.Annotations[annotationConsulSidecarUserVolumeMount]; ok { - var volumeMounts []corev1.VolumeMount - err := json.Unmarshal([]byte(pod.Annotations[annotationConsulSidecarUserVolumeMount]), &volumeMounts) - if err != nil { - return corev1.Container{}, err - } - container.VolumeMounts = append(container.VolumeMounts, volumeMounts...) - } - - tproxyEnabled, err := transparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) - if err != nil { - return corev1.Container{}, err - } - - // If not running in transparent proxy mode and in an OpenShift environment, - // skip setting the security context and let OpenShift set it for us. - // When transparent proxy is enabled, then consul-dataplane needs to run as our specific user - // so that traffic redirection will work. - if tproxyEnabled || !w.EnableOpenShift { - if pod.Spec.SecurityContext != nil { - // User container and consul-dataplane container cannot have the same UID. - if pod.Spec.SecurityContext.RunAsUser != nil && *pod.Spec.SecurityContext.RunAsUser == sidecarUserAndGroupID { - return corev1.Container{}, fmt.Errorf("pod's security context cannot have the same UID as consul-dataplane: %v", sidecarUserAndGroupID) - } - } - // Ensure that none of the user's containers have the same UID as consul-dataplane. At this point in injection the meshWebhook - // has only injected init containers so all containers defined in pod.Spec.Containers are from the user. - for _, c := range pod.Spec.Containers { - // User container and consul-dataplane container cannot have the same UID. - if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil && *c.SecurityContext.RunAsUser == sidecarUserAndGroupID && c.Image != w.ImageConsulDataplane { - return corev1.Container{}, fmt.Errorf("container %q has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", c.Name, sidecarUserAndGroupID) - } - } - container.SecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - ReadOnlyRootFilesystem: pointer.Bool(true), - } - } - - return container, nil -} - -func (w *MeshWebhook) getContainerSidecarCommand(namespace corev1.Namespace, mpi multiPortInfo, bearerTokenFile string, pod corev1.Pod) ([]string, error) { - proxyIDFileName := "/consul/connect-inject/proxyid" - if mpi.serviceName != "" { - proxyIDFileName = fmt.Sprintf("/consul/connect-inject/proxyid-%s", mpi.serviceName) - } - - envoyConcurrency := w.DefaultEnvoyProxyConcurrency - - // Check to see if the user has overriden concurrency via an annotation. - if envoyConcurrencyAnnotation, ok := pod.Annotations[annotationEnvoyProxyConcurrency]; ok { - val, err := strconv.ParseUint(envoyConcurrencyAnnotation, 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse annotation %q: %w", annotationEnvoyProxyConcurrency, err) - } - envoyConcurrency = int(val) - } - - cmd := []string{ - "consul-dataplane", - fmt.Sprintf("-addresses=%q", w.ConsulAddress), - "-grpc-port=" + strconv.Itoa(w.ConsulConfig.GRPCPort), - "-proxy-service-id=" + fmt.Sprintf("$(cat %s)", proxyIDFileName), - "-service-node-name=" + ConsulNodeName, - "-log-level=" + w.LogLevel, - "-log-json=" + strconv.FormatBool(w.LogJSON), - "-envoy-concurrency=" + strconv.Itoa(envoyConcurrency), - } - - if w.AuthMethod != "" { - cmd = append(cmd, - "-credential-type=login", - "-login-auth-method="+w.AuthMethod, - "-login-bearer-token-path="+bearerTokenFile, - "-login-meta="+fmt.Sprintf("pod=%s/%s", namespace.Name, pod.Name), - ) - if w.EnableNamespaces { - if w.EnableK8SNSMirroring { - cmd = append(cmd, "-login-namespace=default") - } else { - cmd = append(cmd, "-login-namespace="+w.consulNamespace(namespace.Name)) - } - } - if w.ConsulPartition != "" { - cmd = append(cmd, "-login-partition="+w.ConsulPartition) - } - } - if w.EnableNamespaces { - cmd = append(cmd, "-service-namespace="+w.consulNamespace(namespace.Name)) - } - if w.ConsulPartition != "" { - cmd = append(cmd, "-service-partition="+w.ConsulPartition) - } - if w.TLSEnabled { - if w.ConsulTLSServerName != "" { - cmd = append(cmd, "-tls-server-name="+w.ConsulTLSServerName) - } - if w.ConsulCACert != "" { - cmd = append(cmd, "-ca-certs="+ConsulCAFile) - } - } else { - cmd = append(cmd, "-tls-disabled") - } - - if mpi.serviceName != "" { - cmd = append(cmd, fmt.Sprintf("-envoy-admin-bind-port=%d", 19000+mpi.serviceIndex)) - } - - metricsServer, err := w.MetricsConfig.shouldRunMergedMetricsServer(pod) - if err != nil { - return nil, fmt.Errorf("unable to determine if merged metrics is enabled: %w", err) - } - if metricsServer { - prometheusScrapePath := w.MetricsConfig.prometheusScrapePath(pod) - mergedMetricsPort, err := w.MetricsConfig.mergedMetricsPort(pod) - if err != nil { - return nil, fmt.Errorf("unable to determine if merged metrics port: %w", err) - } - cmd = append(cmd, "-telemetry-prom-scrape-path="+prometheusScrapePath, - "-telemetry-prom-merge-port="+mergedMetricsPort) - - serviceMetricsPath := w.MetricsConfig.serviceMetricsPath(pod) - serviceMetricsPort, err := w.MetricsConfig.serviceMetricsPort(pod) - if err != nil { - return nil, fmt.Errorf("unable to determine if service metrics port: %w", err) - } - - if serviceMetricsPath != "" && serviceMetricsPort != "" { - cmd = append(cmd, "-telemetry-prom-service-metrics-url="+fmt.Sprintf("http://127.0.0.1:%s%s", serviceMetricsPort, serviceMetricsPath)) - } - - // Pull the TLS config from the relevant annotations. - var prometheusCAFile string - if raw, ok := pod.Annotations[annotationPrometheusCAFile]; ok && raw != "" { - prometheusCAFile = raw - } - - var prometheusCAPath string - if raw, ok := pod.Annotations[annotationPrometheusCAPath]; ok && raw != "" { - prometheusCAPath = raw - } - - var prometheusCertFile string - if raw, ok := pod.Annotations[annotationPrometheusCertFile]; ok && raw != "" { - prometheusCertFile = raw - } - - var prometheusKeyFile string - if raw, ok := pod.Annotations[annotationPrometheusKeyFile]; ok && raw != "" { - prometheusKeyFile = raw - } - - // Validate required Prometheus TLS config is present if set. - if prometheusCAFile != "" || prometheusCAPath != "" || prometheusCertFile != "" || prometheusKeyFile != "" { - if prometheusCAFile == "" && prometheusCAPath == "" { - return nil, fmt.Errorf("must set one of %q or %q when providing prometheus TLS config", annotationPrometheusCAFile, annotationPrometheusCAPath) - } - if prometheusCertFile == "" { - return nil, fmt.Errorf("must set %q when providing prometheus TLS config", annotationPrometheusCertFile) - } - if prometheusKeyFile == "" { - return nil, fmt.Errorf("must set %q when providing prometheus TLS config", annotationPrometheusKeyFile) - } - // TLS config has been validated, add them to the consul-dataplane cmd args - cmd = append(cmd, "-telemetry-prom-ca-certs-file="+prometheusCAFile, - "-telemetry-prom-ca-certs-path="+prometheusCAPath, - "-telemetry-prom-cert-file="+prometheusCertFile, - "-telemetry-prom-key-file="+prometheusKeyFile) - } - } - - // If Consul DNS is enabled, we want to configure consul-dataplane to be the DNS proxy - // for Consul DNS in the pod. - if w.EnableConsulDNS { - cmd = append(cmd, "-consul-dns-bind-port="+strconv.Itoa(ConsulDataplaneDNSBindPort)) - } - - var envoyExtraArgs []string - extraArgs, annotationSet := pod.Annotations[annotationEnvoyExtraArgs] - // --base-id is an envoy arg rather than consul-dataplane, and so we need to make sure we're passing it - // last separated by the --. - if mpi.serviceName != "" { - // --base-id is needed so multiple Envoy proxies can run on the same host. - envoyExtraArgs = append(envoyExtraArgs, "--base-id", fmt.Sprintf("%d", mpi.serviceIndex)) - } - - if annotationSet || w.EnvoyExtraArgs != "" { - extraArgsToUse := w.EnvoyExtraArgs - - // Prefer args set by pod annotation over the flag to the consul-k8s binary (h.EnvoyExtraArgs). - if annotationSet { - extraArgsToUse = extraArgs - } - - // Split string into tokens. - // e.g. "--foo bar --boo baz" --> ["--foo", "bar", "--boo", "baz"] - tokens, err := shlex.Split(extraArgsToUse) - if err != nil { - return []string{}, err - } - for _, t := range tokens { - if strings.Contains(t, " ") { - t = strconv.Quote(t) - } - envoyExtraArgs = append(envoyExtraArgs, t) - } - } - if envoyExtraArgs != nil { - cmd = append(cmd, "--") - cmd = append(cmd, envoyExtraArgs...) - } - - cmd = append([]string{"/bin/sh", "-ec"}, strings.Join(cmd, " ")) - return cmd, nil -} - -func (w *MeshWebhook) sidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { - resources := corev1.ResourceRequirements{ - Limits: corev1.ResourceList{}, - Requests: corev1.ResourceList{}, - } - // zeroQuantity is used for comparison to see if a quantity was explicitly - // set. - var zeroQuantity resource.Quantity - - // NOTE: We only want to set the limit/request if the default or annotation - // was explicitly set. If it's not explicitly set, it will be the zero value - // which would show up in the pod spec as being explicitly set to zero if we - // set that key, e.g. "cpu" to zero. - // We want it to not show up in the pod spec at all if it's not explicitly - // set so that users aren't wondering why it's set to 0 when they didn't specify - // a request/limit. If they have explicitly set it to 0 then it will be set - // to 0 in the pod spec because we're doing a comparison to the zero-valued - // struct. - - // CPU Limit. - if anno, ok := pod.Annotations[annotationSidecarProxyCPULimit]; ok { - cpuLimit, err := resource.ParseQuantity(anno) - if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyCPULimit, anno, err) - } - resources.Limits[corev1.ResourceCPU] = cpuLimit - } else if w.DefaultProxyCPULimit != zeroQuantity { - resources.Limits[corev1.ResourceCPU] = w.DefaultProxyCPULimit - } - - // CPU Request. - if anno, ok := pod.Annotations[annotationSidecarProxyCPURequest]; ok { - cpuRequest, err := resource.ParseQuantity(anno) - if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyCPURequest, anno, err) - } - resources.Requests[corev1.ResourceCPU] = cpuRequest - } else if w.DefaultProxyCPURequest != zeroQuantity { - resources.Requests[corev1.ResourceCPU] = w.DefaultProxyCPURequest - } - - // Memory Limit. - if anno, ok := pod.Annotations[annotationSidecarProxyMemoryLimit]; ok { - memoryLimit, err := resource.ParseQuantity(anno) - if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyMemoryLimit, anno, err) - } - resources.Limits[corev1.ResourceMemory] = memoryLimit - } else if w.DefaultProxyMemoryLimit != zeroQuantity { - resources.Limits[corev1.ResourceMemory] = w.DefaultProxyMemoryLimit - } - - // Memory Request. - if anno, ok := pod.Annotations[annotationSidecarProxyMemoryRequest]; ok { - memoryRequest, err := resource.ParseQuantity(anno) - if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyMemoryRequest, anno, err) - } - resources.Requests[corev1.ResourceMemory] = memoryRequest - } else if w.DefaultProxyMemoryRequest != zeroQuantity { - resources.Requests[corev1.ResourceMemory] = w.DefaultProxyMemoryRequest - } - - return resources, nil -} diff --git a/control-plane/connect-inject/consul_dataplane_sidecar_test.go b/control-plane/connect-inject/consul_dataplane_sidecar_test.go deleted file mode 100644 index 5639205726..0000000000 --- a/control-plane/connect-inject/consul_dataplane_sidecar_test.go +++ /dev/null @@ -1,1035 +0,0 @@ -package connectinject - -import ( - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" -) - -func TestHandlerConsulDataplaneSidecar(t *testing.T) { - cases := map[string]struct { - webhookSetupFunc func(w *MeshWebhook) - additionalExpCmdArgs string - }{ - "default": { - webhookSetupFunc: nil, - additionalExpCmdArgs: " -tls-disabled", - }, - "with custom gRPC port": { - webhookSetupFunc: func(w *MeshWebhook) { - w.ConsulConfig.GRPCPort = 8602 - }, - additionalExpCmdArgs: " -tls-disabled", - }, - "with ACLs": { - webhookSetupFunc: func(w *MeshWebhook) { - w.AuthMethod = "test-auth-method" - }, - additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + - "-login-meta=pod=k8snamespace/test-pod -tls-disabled", - }, - "with ACLs and namespace mirroring": { - webhookSetupFunc: func(w *MeshWebhook) { - w.AuthMethod = "test-auth-method" - w.EnableNamespaces = true - w.EnableK8SNSMirroring = true - }, - additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + - "-login-meta=pod=k8snamespace/test-pod -login-namespace=default -service-namespace=k8snamespace -tls-disabled", - }, - "with ACLs and single destination namespace": { - webhookSetupFunc: func(w *MeshWebhook) { - w.AuthMethod = "test-auth-method" - w.EnableNamespaces = true - w.ConsulDestinationNamespace = "test-ns" - }, - additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + - "-login-meta=pod=k8snamespace/test-pod -login-namespace=test-ns -service-namespace=test-ns -tls-disabled", - }, - "with ACLs and partitions": { - webhookSetupFunc: func(w *MeshWebhook) { - w.AuthMethod = "test-auth-method" - w.ConsulPartition = "test-part" - }, - additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + - "-login-meta=pod=k8snamespace/test-pod -login-partition=test-part -service-partition=test-part -tls-disabled", - }, - "with TLS and CA cert provided": { - webhookSetupFunc: func(w *MeshWebhook) { - w.TLSEnabled = true - w.ConsulTLSServerName = "server.dc1.consul" - w.ConsulCACert = "consul-ca-cert" - }, - additionalExpCmdArgs: " -tls-server-name=server.dc1.consul -ca-certs=/consul/connect-inject/consul-ca.pem", - }, - "with TLS and no CA cert provided": { - webhookSetupFunc: func(w *MeshWebhook) { - w.TLSEnabled = true - w.ConsulTLSServerName = "server.dc1.consul" - }, - additionalExpCmdArgs: " -tls-server-name=server.dc1.consul", - }, - "with single destination namespace": { - webhookSetupFunc: func(w *MeshWebhook) { - w.EnableNamespaces = true - w.ConsulDestinationNamespace = "consul-namespace" - }, - additionalExpCmdArgs: " -service-namespace=consul-namespace -tls-disabled", - }, - "with namespace mirroring": { - webhookSetupFunc: func(w *MeshWebhook) { - w.EnableNamespaces = true - w.EnableK8SNSMirroring = true - }, - additionalExpCmdArgs: " -service-namespace=k8snamespace -tls-disabled", - }, - "with namespace mirroring prefix": { - webhookSetupFunc: func(w *MeshWebhook) { - w.EnableNamespaces = true - w.EnableK8SNSMirroring = true - w.K8SNSMirroringPrefix = "foo-" - }, - additionalExpCmdArgs: " -service-namespace=foo-k8snamespace -tls-disabled", - }, - "with partitions": { - webhookSetupFunc: func(w *MeshWebhook) { - w.ConsulPartition = "partition-1" - }, - additionalExpCmdArgs: " -service-partition=partition-1 -tls-disabled", - }, - "with different log level": { - webhookSetupFunc: func(w *MeshWebhook) { - w.LogLevel = "debug" - }, - additionalExpCmdArgs: " -tls-disabled", - }, - "with different log level and log json": { - webhookSetupFunc: func(w *MeshWebhook) { - w.LogLevel = "debug" - w.LogJSON = true - }, - additionalExpCmdArgs: " -tls-disabled", - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - w := &MeshWebhook{ - ConsulAddress: "1.1.1.1", - ConsulConfig: &consul.Config{GRPCPort: 8502}, - LogLevel: "info", - LogJSON: false, - } - if c.webhookSetupFunc != nil { - c.webhookSetupFunc(w) - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Annotations: map[string]string{ - annotationService: "foo", - }, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - { - Name: "web-side", - }, - { - Name: "auth-method-secret", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - }, - }, - }, - ServiceAccountName: "web", - }, - } - - container, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - require.NoError(t, err) - expCmd := []string{ - "/bin/sh", "-ec", - "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=" + strconv.Itoa(w.ConsulConfig.GRPCPort) + - " -proxy-service-id=$(cat /consul/connect-inject/proxyid) " + - "-service-node-name=k8s-service-mesh -log-level=" + w.LogLevel + " -log-json=" + strconv.FormatBool(w.LogJSON) + " -envoy-concurrency=0" + c.additionalExpCmdArgs, - } - require.Equal(t, expCmd, container.Command) - - if w.AuthMethod != "" { - require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - }) - } else { - require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - }) - } - - expectedProbe := &corev1.Probe{ - Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(EnvoyInboundListenerPort), - }, - }, - InitialDelaySeconds: 1, - } - require.Equal(t, expectedProbe, container.ReadinessProbe) - require.Equal(t, expectedProbe, container.LivenessProbe) - require.Nil(t, container.StartupProbe) - require.Len(t, container.Env, 1) - require.Equal(t, container.Env[0].Name, "TMPDIR") - require.Equal(t, container.Env[0].Value, "/consul/connect-inject") - }) - } -} - -func TestHandlerConsulDataplaneSidecar_Concurrency(t *testing.T) { - cases := map[string]struct { - annotations map[string]string - expFlags string - expErr string - }{ - "default settings, no annotations": { - annotations: map[string]string{ - annotationService: "foo", - }, - expFlags: "-envoy-concurrency=0", - }, - "default settings, annotation override": { - annotations: map[string]string{ - annotationService: "foo", - annotationEnvoyProxyConcurrency: "42", - }, - expFlags: "-envoy-concurrency=42", - }, - "default settings, invalid concurrency annotation negative number": { - annotations: map[string]string{ - annotationService: "foo", - annotationEnvoyProxyConcurrency: "-42", - }, - expErr: "unable to parse annotation \"consul.hashicorp.com/consul-envoy-proxy-concurrency\": strconv.ParseUint: parsing \"-42\": invalid syntax", - }, - "default settings, not-parseable concurrency annotation": { - annotations: map[string]string{ - annotationService: "foo", - annotationEnvoyProxyConcurrency: "not-int", - }, - expErr: "unable to parse annotation \"consul.hashicorp.com/consul-envoy-proxy-concurrency\": strconv.ParseUint: parsing \"not-int\": invalid syntax", - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - h := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: c.annotations, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - container, err := h.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - if c.expErr != "" { - require.EqualError(t, err, c.expErr) - } else { - require.NoError(t, err) - require.Contains(t, container.Command[2], c.expFlags) - } - }) - } -} - -func TestHandlerConsulDataplaneSidecar_DNSProxy(t *testing.T) { - h := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - EnableConsulDNS: true, - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{}, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - container, err := h.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - require.NoError(t, err) - require.Contains(t, container.Command[2], "-consul-dns-bind-port=8600") -} - -func TestHandlerConsulDataplaneSidecar_Multiport(t *testing.T) { - for _, aclsEnabled := range []bool{false, true} { - name := fmt.Sprintf("acls enabled: %t", aclsEnabled) - t.Run(name, func(t *testing.T) { - w := MeshWebhook{ - ConsulAddress: "1.1.1.1", - ConsulConfig: &consul.Config{GRPCPort: 8502}, - LogLevel: "info", - } - if aclsEnabled { - w.AuthMethod = "test-auth-method" - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Annotations: map[string]string{ - annotationService: "web,web-admin", - }, - }, - - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "web-admin-service-account", - }, - }, - Containers: []corev1.Container{ - { - Name: "web", - }, - { - Name: "web-side", - }, - { - Name: "web-admin", - }, - { - Name: "web-admin-side", - }, - { - Name: "auth-method-secret", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - }, - }, - }, - ServiceAccountName: "web", - }, - } - multiPortInfos := []multiPortInfo{ - { - serviceIndex: 0, - serviceName: "web", - }, - { - serviceIndex: 1, - serviceName: "web-admin", - }, - } - expCommand := [][]string{ - {"/bin/sh", "-ec", "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web) " + - "-service-node-name=k8s-service-mesh -log-level=info -log-json=false -envoy-concurrency=0 -tls-disabled -envoy-admin-bind-port=19000 -- --base-id 0"}, - {"/bin/sh", "-ec", "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web-admin) " + - "-service-node-name=k8s-service-mesh -log-level=info -log-json=false -envoy-concurrency=0 -tls-disabled -envoy-admin-bind-port=19001 -- --base-id 1"}, - } - if aclsEnabled { - expCommand = [][]string{ - {"/bin/sh", "-ec", "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web) " + - "-service-node-name=k8s-service-mesh -log-level=info -log-json=false -envoy-concurrency=0 -credential-type=login -login-auth-method=test-auth-method " + - "-login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token -login-meta=pod=k8snamespace/test-pod -tls-disabled -envoy-admin-bind-port=19000 -- --base-id 0"}, - {"/bin/sh", "-ec", "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web-admin) " + - "-service-node-name=k8s-service-mesh -log-level=info -log-json=false -envoy-concurrency=0 -credential-type=login -login-auth-method=test-auth-method " + - "-login-bearer-token-path=/consul/serviceaccount-web-admin/token -login-meta=pod=k8snamespace/test-pod -tls-disabled -envoy-admin-bind-port=19001 -- --base-id 1"}, - } - } - expSAVolumeMounts := []corev1.VolumeMount{ - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - { - Name: "web-admin-service-account", - MountPath: "/consul/serviceaccount-web-admin", - ReadOnly: true, - }, - } - - for i, expCmd := range expCommand { - container, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfos[i]) - require.NoError(t, err) - require.Equal(t, expCmd, container.Command) - - if w.AuthMethod != "" { - require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - expSAVolumeMounts[i], - }) - } else { - require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - }) - } - - port := EnvoyInboundListenerPort + i - expectedProbe := &corev1.Probe{ - Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(port), - }, - }, - InitialDelaySeconds: 1, - } - require.Equal(t, expectedProbe, container.ReadinessProbe) - require.Equal(t, expectedProbe, container.LivenessProbe) - require.Nil(t, container.StartupProbe) - } - }) - } -} - -func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { - cases := map[string]struct { - tproxyEnabled bool - openShiftEnabled bool - expSecurityContext *corev1.SecurityContext - }{ - "tproxy disabled; openshift disabled": { - tproxyEnabled: false, - openShiftEnabled: false, - expSecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - ReadOnlyRootFilesystem: pointer.Bool(true), - }, - }, - "tproxy enabled; openshift disabled": { - tproxyEnabled: true, - openShiftEnabled: false, - expSecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - ReadOnlyRootFilesystem: pointer.Bool(true), - }, - }, - "tproxy disabled; openshift enabled": { - tproxyEnabled: false, - openShiftEnabled: true, - expSecurityContext: nil, - }, - "tproxy enabled; openshift enabled": { - tproxyEnabled: true, - openShiftEnabled: true, - expSecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - ReadOnlyRootFilesystem: pointer.Bool(true), - }, - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - w := MeshWebhook{ - EnableTransparentProxy: c.tproxyEnabled, - EnableOpenShift: c.openShiftEnabled, - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationService: "foo", - }, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - ec, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - require.NoError(t, err) - require.Equal(t, c.expSecurityContext, ec.SecurityContext) - }) - } -} - -// Test that if the user specifies a pod security context with the same uid as `sidecarUserAndGroupID` that we return -// an error to the meshWebhook. -func TestHandlerConsulDataplaneSidecar_FailsWithDuplicatePodSecurityContextUID(t *testing.T) { - require := require.New(t) - w := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - pod := corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - }, - }, - } - _, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - require.EqualError(err, fmt.Sprintf("pod's security context cannot have the same UID as consul-dataplane: %v", sidecarUserAndGroupID)) -} - -// Test that if the user specifies a container with security context with the same uid as `sidecarUserAndGroupID` that we -// return an error to the meshWebhook. If a container using the consul-dataplane image has the same uid, we don't return an error -// because in multiport pod there can be multiple consul-dataplane sidecars. -func TestHandlerConsulDataplaneSidecar_FailsWithDuplicateContainerSecurityContextUID(t *testing.T) { - cases := []struct { - name string - pod corev1.Pod - webhook MeshWebhook - expErr bool - expErrMessage string - }{ - { - name: "fails with non consul-dataplane image", - pod: corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - // Setting RunAsUser: 1 should succeed. - SecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(1), - }, - }, - { - Name: "app", - // Setting RunAsUser: 5995 should fail. - SecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - }, - Image: "not-consul-dataplane", - }, - }, - }, - }, - webhook: MeshWebhook{}, - expErr: true, - expErrMessage: fmt.Sprintf("container \"app\" has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", sidecarUserAndGroupID), - }, - { - name: "doesn't fail with envoy image", - pod: corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - // Setting RunAsUser: 1 should succeed. - SecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(1), - }, - }, - { - Name: "sidecar", - // Setting RunAsUser: 5995 should succeed if the image matches h.ImageConsulDataplane. - SecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - }, - Image: "envoy", - }, - }, - }, - }, - webhook: MeshWebhook{ - ImageConsulDataplane: "envoy", - }, - expErr: false, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - tc.webhook.ConsulConfig = &consul.Config{HTTPPort: 8500, GRPCPort: 8502} - _, err := tc.webhook.consulDataplaneSidecar(testNS, tc.pod, multiPortInfo{}) - if tc.expErr { - require.EqualError(t, err, tc.expErrMessage) - } else { - require.NoError(t, err) - } - }) - } -} - -// Test that we can pass extra args to envoy via the extraEnvoyArgs flag -// or via pod annotations. When arguments are passed in both ways, the -// arguments set via pod annotations are used. -func TestHandlerConsulDataplaneSidecar_EnvoyExtraArgs(t *testing.T) { - cases := []struct { - name string - envoyExtraArgs string - pod *corev1.Pod - expectedExtraArgs string - }{ - { - name: "no extra options provided", - envoyExtraArgs: "", - pod: &corev1.Pod{}, - expectedExtraArgs: "", - }, - { - name: "via flag: extra log-level option", - envoyExtraArgs: "--log-level debug", - pod: &corev1.Pod{}, - expectedExtraArgs: "-- --log-level debug", - }, - { - name: "via flag: multiple arguments with quotes", - envoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - pod: &corev1.Pod{}, - expectedExtraArgs: "-- --log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - { - name: "via annotation: multiple arguments with quotes", - envoyExtraArgs: "", - pod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - }, - }, - expectedExtraArgs: "-- --log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - { - name: "via flag and annotation: should prefer setting via the annotation", - envoyExtraArgs: "this should be overwritten", - pod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - }, - }, - expectedExtraArgs: "-- --log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - h := MeshWebhook{ - ImageConsul: "hashicorp/consul:latest", - ImageConsulDataplane: "hashicorp/consul-k8s:latest", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - EnvoyExtraArgs: tc.envoyExtraArgs, - } - - c, err := h.consulDataplaneSidecar(testNS, *tc.pod, multiPortInfo{}) - require.NoError(t, err) - require.Contains(t, c.Command[2], tc.expectedExtraArgs) - }) - } -} - -func TestHandlerConsulDataplaneSidecar_UserVolumeMounts(t *testing.T) { - cases := []struct { - name string - pod corev1.Pod - expectedContainerVolumeMounts []corev1.VolumeMount - expErr string - }{ - { - name: "able to set a sidecar container volume mount via annotation", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - annotationConsulSidecarUserVolumeMount: "[{\"name\": \"tls-cert\", \"mountPath\": \"/custom/path\"}, {\"name\": \"tls-ca\", \"mountPath\": \"/custom/path2\"}]", - }, - }, - }, - expectedContainerVolumeMounts: []corev1.VolumeMount{ - { - Name: "consul-connect-inject-data", - MountPath: "/consul/connect-inject", - }, - { - Name: "tls-cert", - MountPath: "/custom/path", - }, - { - Name: "tls-ca", - MountPath: "/custom/path2", - }, - }, - }, - { - name: "invalid annotation results in error", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - annotationConsulSidecarUserVolumeMount: "[abcdefg]", - }, - }, - }, - expErr: "invalid character 'a' looking ", - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - h := MeshWebhook{ - ImageConsul: "hashicorp/consul:latest", - ImageConsulDataplane: "hashicorp/consul-k8s:latest", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - c, err := h.consulDataplaneSidecar(testNS, tc.pod, multiPortInfo{}) - if tc.expErr == "" { - require.NoError(t, err) - require.Equal(t, tc.expectedContainerVolumeMounts, c.VolumeMounts) - } else { - require.Error(t, err) - require.Contains(t, err.Error(), tc.expErr) - } - }) - } -} - -func TestHandlerConsulDataplaneSidecar_Resources(t *testing.T) { - mem1 := resource.MustParse("100Mi") - mem2 := resource.MustParse("200Mi") - cpu1 := resource.MustParse("100m") - cpu2 := resource.MustParse("200m") - zero := resource.MustParse("0") - - cases := map[string]struct { - webhook MeshWebhook - annotations map[string]string - expResources corev1.ResourceRequirements - expErr string - }{ - "no defaults, no annotations": { - webhook: MeshWebhook{}, - annotations: nil, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{}, - Requests: corev1.ResourceList{}, - }, - }, - "all defaults, no annotations": { - webhook: MeshWebhook{ - DefaultProxyCPURequest: cpu1, - DefaultProxyCPULimit: cpu2, - DefaultProxyMemoryRequest: mem1, - DefaultProxyMemoryLimit: mem2, - }, - annotations: nil, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: cpu2, - corev1.ResourceMemory: mem2, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: cpu1, - corev1.ResourceMemory: mem1, - }, - }, - }, - "no defaults, all annotations": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - annotationSidecarProxyCPURequest: "100m", - annotationSidecarProxyMemoryRequest: "100Mi", - annotationSidecarProxyCPULimit: "200m", - annotationSidecarProxyMemoryLimit: "200Mi", - }, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: cpu2, - corev1.ResourceMemory: mem2, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: cpu1, - corev1.ResourceMemory: mem1, - }, - }, - }, - "annotations override defaults": { - webhook: MeshWebhook{ - DefaultProxyCPURequest: zero, - DefaultProxyCPULimit: zero, - DefaultProxyMemoryRequest: zero, - DefaultProxyMemoryLimit: zero, - }, - annotations: map[string]string{ - annotationSidecarProxyCPURequest: "100m", - annotationSidecarProxyMemoryRequest: "100Mi", - annotationSidecarProxyCPULimit: "200m", - annotationSidecarProxyMemoryLimit: "200Mi", - }, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: cpu2, - corev1.ResourceMemory: mem2, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: cpu1, - corev1.ResourceMemory: mem1, - }, - }, - }, - "defaults set to zero, no annotations": { - webhook: MeshWebhook{ - DefaultProxyCPURequest: zero, - DefaultProxyCPULimit: zero, - DefaultProxyMemoryRequest: zero, - DefaultProxyMemoryLimit: zero, - }, - annotations: nil, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: zero, - corev1.ResourceMemory: zero, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: zero, - corev1.ResourceMemory: zero, - }, - }, - }, - "annotations set to 0": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - annotationSidecarProxyCPURequest: "0", - annotationSidecarProxyMemoryRequest: "0", - annotationSidecarProxyCPULimit: "0", - annotationSidecarProxyMemoryLimit: "0", - }, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: zero, - corev1.ResourceMemory: zero, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: zero, - corev1.ResourceMemory: zero, - }, - }, - }, - "invalid cpu request": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - annotationSidecarProxyCPURequest: "invalid", - }, - expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-request:\"invalid\": quantities must match the regular expression", - }, - "invalid cpu limit": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - annotationSidecarProxyCPULimit: "invalid", - }, - expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-limit:\"invalid\": quantities must match the regular expression", - }, - "invalid memory request": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - annotationSidecarProxyMemoryRequest: "invalid", - }, - expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-request:\"invalid\": quantities must match the regular expression", - }, - "invalid memory limit": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - annotationSidecarProxyMemoryLimit: "invalid", - }, - expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-limit:\"invalid\": quantities must match the regular expression", - }, - } - - for name, c := range cases { - t.Run(name, func(tt *testing.T) { - c.webhook.ConsulConfig = &consul.Config{HTTPPort: 8500, GRPCPort: 8502} - require := require.New(tt) - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: c.annotations, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - container, err := c.webhook.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - if c.expErr != "" { - require.NotNil(err) - require.Contains(err.Error(), c.expErr) - } else { - require.NoError(err) - require.Equal(c.expResources, container.Resources) - } - }) - } -} - -func TestHandlerConsulDataplaneSidecar_Metrics(t *testing.T) { - cases := []struct { - name string - pod corev1.Pod - expCmdArgs string - expErr string - }{ - { - name: "default", - pod: corev1.Pod{}, - expCmdArgs: "", - }, - { - name: "turning on merged metrics", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationService: "web", - annotationEnableMetrics: "true", - annotationEnableMetricsMerging: "true", - annotationMergedMetricsPort: "20100", - annotationPort: "1234", - annotationPrometheusScrapePath: "/scrape-path", - }, - }, - }, - expCmdArgs: "-telemetry-prom-scrape-path=/scrape-path -telemetry-prom-merge-port=20100 -telemetry-prom-service-metrics-url=http://127.0.0.1:1234/metrics", - }, - { - name: "merged metrics with TLS enabled", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationService: "web", - annotationEnableMetrics: "true", - annotationEnableMetricsMerging: "true", - annotationMergedMetricsPort: "20100", - annotationPort: "1234", - annotationPrometheusScrapePath: "/scrape-path", - annotationPrometheusCAFile: "/certs/ca.crt", - annotationPrometheusCAPath: "/certs/ca", - annotationPrometheusCertFile: "/certs/server.crt", - annotationPrometheusKeyFile: "/certs/key.pem", - }, - }, - }, - expCmdArgs: "-telemetry-prom-scrape-path=/scrape-path -telemetry-prom-merge-port=20100 -telemetry-prom-service-metrics-url=http://127.0.0.1:1234/metrics -telemetry-prom-ca-certs-file=/certs/ca.crt -telemetry-prom-ca-certs-path=/certs/ca -telemetry-prom-cert-file=/certs/server.crt -telemetry-prom-key-file=/certs/key.pem", - }, - { - name: "merge metrics with TLS enabled, missing CA gives an error", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationService: "web", - annotationEnableMetrics: "true", - annotationEnableMetricsMerging: "true", - annotationMergedMetricsPort: "20100", - annotationPort: "1234", - annotationPrometheusScrapePath: "/scrape-path", - annotationPrometheusCertFile: "/certs/server.crt", - annotationPrometheusKeyFile: "/certs/key.pem", - }, - }, - }, - expCmdArgs: "", - expErr: fmt.Sprintf("must set one of %q or %q when providing prometheus TLS config", annotationPrometheusCAFile, annotationPrometheusCAPath), - }, - { - name: "merge metrics with TLS enabled, missing cert gives an error", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationService: "web", - annotationEnableMetrics: "true", - annotationEnableMetricsMerging: "true", - annotationMergedMetricsPort: "20100", - annotationPort: "1234", - annotationPrometheusScrapePath: "/scrape-path", - annotationPrometheusCAFile: "/certs/ca.crt", - annotationPrometheusKeyFile: "/certs/key.pem", - }, - }, - }, - expCmdArgs: "", - expErr: fmt.Sprintf("must set %q when providing prometheus TLS config", annotationPrometheusCertFile), - }, - { - name: "merge metrics with TLS enabled, missing key file gives an error", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationService: "web", - annotationEnableMetrics: "true", - annotationEnableMetricsMerging: "true", - annotationMergedMetricsPort: "20100", - annotationPort: "1234", - annotationPrometheusScrapePath: "/scrape-path", - annotationPrometheusCAPath: "/certs/ca", - annotationPrometheusCertFile: "/certs/server.crt", - }, - }, - }, - expCmdArgs: "", - expErr: fmt.Sprintf("must set %q when providing prometheus TLS config", annotationPrometheusKeyFile), - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - h := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - container, err := h.consulDataplaneSidecar(testNS, c.pod, multiPortInfo{}) - if c.expErr != "" { - require.NotNil(t, err) - require.Contains(t, err.Error(), c.expErr) - } else { - require.NoError(t, err) - require.Contains(t, container.Command[2], c.expCmdArgs) - } - }) - } -} diff --git a/control-plane/connect-inject/consul_sidecar.go b/control-plane/connect-inject/consul_sidecar.go new file mode 100644 index 0000000000..a19eebb5ef --- /dev/null +++ b/control-plane/connect-inject/consul_sidecar.go @@ -0,0 +1,115 @@ +package connectinject + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +// consulSidecar starts the consul-sidecar command to only run +// the metrics merging server when metrics merging feature is enabled. +// It always disables service registration because for connect we no longer +// need to keep services registered as this is handled in the endpoints-controller. +func (w *MeshWebhook) consulSidecar(pod corev1.Pod) (corev1.Container, error) { + metricsPorts, err := w.MetricsConfig.mergedMetricsServerConfiguration(pod) + if err != nil { + return corev1.Container{}, err + } + + resources, err := w.consulSidecarResources(pod) + if err != nil { + return corev1.Container{}, err + } + + command := []string{ + "consul-k8s-control-plane", + "consul-sidecar", + "-enable-service-registration=false", + "-enable-metrics-merging=true", + fmt.Sprintf("-merged-metrics-port=%s", metricsPorts.mergedPort), + fmt.Sprintf("-service-metrics-port=%s", metricsPorts.servicePort), + fmt.Sprintf("-service-metrics-path=%s", metricsPorts.servicePath), + fmt.Sprintf("-log-level=%s", w.LogLevel), + fmt.Sprintf("-log-json=%t", w.LogJSON), + } + + return corev1.Container{ + Name: "consul-sidecar", + Image: w.ImageConsulK8S, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }, + Command: command, + Resources: resources, + }, nil +} + +func (w *MeshWebhook) consulSidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { + resources := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + } + // zeroQuantity is used for comparison to see if a quantity was explicitly + // set. + var zeroQuantity resource.Quantity + + // NOTE: We only want to set the limit/request if the default or annotation + // was explicitly set. If it's not explicitly set, it will be the zero value + // which would show up in the pod spec as being explicitly set to zero if we + // set that key, e.g. "cpu" to zero. + // We want it to not show up in the pod spec at all if if it's not explicitly + // set so that users aren't wondering why it's set to 0 when they didn't specify + // a request/limit. If they have explicitly set it to 0 then it will be set + // to 0 in the pod spec because we're doing a comparison to the zero-valued + // struct. + + // CPU Limit. + if anno, ok := pod.Annotations[annotationConsulSidecarCPULimit]; ok { + cpuLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarCPULimit, anno, err) + } + resources.Limits[corev1.ResourceCPU] = cpuLimit + } else if w.DefaultConsulSidecarResources.Limits[corev1.ResourceCPU] != zeroQuantity { + resources.Limits[corev1.ResourceCPU] = w.DefaultConsulSidecarResources.Limits[corev1.ResourceCPU] + } + + // CPU Request. + if anno, ok := pod.Annotations[annotationConsulSidecarCPURequest]; ok { + cpuRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarCPURequest, anno, err) + } + resources.Requests[corev1.ResourceCPU] = cpuRequest + } else if w.DefaultConsulSidecarResources.Requests[corev1.ResourceCPU] != zeroQuantity { + resources.Requests[corev1.ResourceCPU] = w.DefaultConsulSidecarResources.Requests[corev1.ResourceCPU] + } + + // Memory Limit. + if anno, ok := pod.Annotations[annotationConsulSidecarMemoryLimit]; ok { + memoryLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarMemoryLimit, anno, err) + } + resources.Limits[corev1.ResourceMemory] = memoryLimit + } else if w.DefaultConsulSidecarResources.Limits[corev1.ResourceMemory] != zeroQuantity { + resources.Limits[corev1.ResourceMemory] = w.DefaultConsulSidecarResources.Limits[corev1.ResourceMemory] + } + + // Memory Request. + if anno, ok := pod.Annotations[annotationConsulSidecarMemoryRequest]; ok { + memoryRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarMemoryRequest, anno, err) + } + resources.Requests[corev1.ResourceMemory] = memoryRequest + } else if w.DefaultConsulSidecarResources.Requests[corev1.ResourceMemory] != zeroQuantity { + resources.Requests[corev1.ResourceMemory] = w.DefaultConsulSidecarResources.Requests[corev1.ResourceMemory] + } + + return resources, nil +} diff --git a/control-plane/connect-inject/consul_sidecar_test.go b/control-plane/connect-inject/consul_sidecar_test.go new file mode 100644 index 0000000000..bafaad104a --- /dev/null +++ b/control-plane/connect-inject/consul_sidecar_test.go @@ -0,0 +1,343 @@ +package connectinject + +import ( + "testing" + + logrtest "github.com/go-logr/logr/testing" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Test that if the conditions for running a merged metrics server are true, +// that we pass the metrics flags to consul sidecar. +func TestConsulSidecar_MetricsFlags(t *testing.T) { + meshWebhook := MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + } + container, err := meshWebhook.consulSidecar(corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + }) + + require.NoError(t, err) + require.Contains(t, container.Command, "-enable-metrics-merging=true") + require.Contains(t, container.Command, "-merged-metrics-port=20100") + require.Contains(t, container.Command, "-service-metrics-port=8080") + require.Contains(t, container.Command, "-service-metrics-path=/metrics") +} + +func TestHandlerConsulSidecar_Resources(t *testing.T) { + mem1 := resource.MustParse("100Mi") + mem2 := resource.MustParse("200Mi") + cpu1 := resource.MustParse("100m") + cpu2 := resource.MustParse("200m") + zero := resource.MustParse("0") + + cases := map[string]struct { + meshWebhook MeshWebhook + annotations map[string]string + expResources corev1.ResourceRequirements + expErr string + }{ + "no defaults, no annotations": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + "all defaults, no annotations": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + DefaultConsulSidecarResources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "no defaults, all annotations": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPURequest: "100m", + annotationConsulSidecarMemoryRequest: "100Mi", + annotationConsulSidecarCPULimit: "200m", + annotationConsulSidecarMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "annotations override defaults": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + DefaultConsulSidecarResources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPURequest: "100m", + annotationConsulSidecarMemoryRequest: "100Mi", + annotationConsulSidecarCPULimit: "200m", + annotationConsulSidecarMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "defaults set to zero, no annotations": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + DefaultConsulSidecarResources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "annotations set to 0": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPURequest: "0", + annotationConsulSidecarMemoryRequest: "0", + annotationConsulSidecarCPULimit: "0", + annotationConsulSidecarMemoryLimit: "0", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "invalid cpu request": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPURequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-cpu-request:\"invalid\": quantities must match the regular expression", + }, + "invalid cpu limit": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPULimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-cpu-limit:\"invalid\": quantities must match the regular expression", + }, + "invalid memory request": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarMemoryRequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-memory-request:\"invalid\": quantities must match the regular expression", + }, + "invalid memory limit": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarMemoryLimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-memory-limit:\"invalid\": quantities must match the regular expression", + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := c.meshWebhook.consulSidecar(pod) + if c.expErr != "" { + require.NotNil(err) + require.Contains(err.Error(), c.expErr) + } else { + require.NoError(err) + require.Equal(c.expResources, container.Resources) + } + }) + } +} diff --git a/control-plane/connect-inject/container_init.go b/control-plane/connect-inject/container_init.go index 7e819b3584..e0ef413011 100644 --- a/control-plane/connect-inject/container_init.go +++ b/control-plane/connect-inject/container_init.go @@ -3,37 +3,132 @@ package connectinject import ( "bytes" "fmt" + "os" "strconv" "strings" "text/template" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" ) const ( + InjectInitCopyContainerName = "copy-consul-bin" InjectInitContainerName = "consul-connect-inject-init" rootUserAndGroupID = 0 - sidecarUserAndGroupID = 5995 + envoyUserAndGroupID = 5995 initContainersUserAndGroupID = 5996 netAdminCapability = "NET_ADMIN" + dnsServiceHostEnvSuffix = "DNS_SERVICE_HOST" ) type initContainerCommandData struct { ServiceName string ServiceAccountName string AuthMethod string + // ConsulPartition is the Consul admin partition to register the service + // and proxy in. An empty string indicates partitions are not + // enabled in Consul (necessary for OSS). + ConsulPartition string + // ConsulNamespace is the Consul namespace to register the service + // and proxy in. An empty string indicates namespaces are not + // enabled in Consul (necessary for OSS). + ConsulNamespace string + NamespaceMirroringEnabled bool - // ConsulNodeName is the node name in Consul where services are registered. - ConsulNodeName string + // The PEM-encoded CA certificate to use when + // communicating with Consul clients + ConsulCACert string + // EnableMetrics adds a listener to Envoy where Prometheus will scrape + // metrics from. + EnableMetrics bool + // PrometheusScrapePath configures the path on the listener on Envoy where + // Prometheus will scrape metrics from. + PrometheusScrapePath string + // PrometheusBackendPort configures where the listener on Envoy will point to. + PrometheusBackendPort string + // The file paths to use for configuring TLS on the Prometheus metrics endpoint. + PrometheusCAFile string + PrometheusCAPath string + PrometheusCertFile string + PrometheusKeyFile string + // EnvoyUID is the Linux user id that will be used when tproxy is enabled. + EnvoyUID int + + // EnableTransparentProxy configures this init container to run in transparent proxy mode, + // i.e. run consul connect redirect-traffic command and add the required privileges to the + // container to do that. + EnableTransparentProxy bool + + // EnableCNI configures this init container to skip the redirect-traffic command as traffic + // redirection is handled by the CNI plugin on pod creation. + EnableCNI bool + + // TProxyExcludeInboundPorts is a list of inbound ports to exclude from traffic redirection via + // the consul connect redirect-traffic command. + TProxyExcludeInboundPorts []string + + // TProxyExcludeOutboundPorts is a list of outbound ports to exclude from traffic redirection via + // the consul connect redirect-traffic command. + TProxyExcludeOutboundPorts []string + + // TProxyExcludeOutboundCIDRs is a list of outbound CIDRs to exclude from traffic redirection via + // the consul connect redirect-traffic command. + TProxyExcludeOutboundCIDRs []string + + // TProxyExcludeUIDs is a list of additional user IDs to exclude from traffic redirection via + // the consul connect redirect-traffic command. + TProxyExcludeUIDs []string + + // ConsulDNSClusterIP is the IP of the Consul DNS Service. + ConsulDNSClusterIP string // MultiPort determines whether this is a multi port Pod, which configures the init container to be specific to one // of the services on the multi port Pod. MultiPort bool - // Log settings for the connect-init command. - LogLevel string - LogJSON bool + // EnvoyAdminPort configures the admin port of the Envoy sidecar. This will be unique per service in a multi port + // Pod. + EnvoyAdminPort int + + // BearerTokenFile configures where the service account token can be found. This will be unique per service in a + // multi port Pod. + BearerTokenFile string + + // ConsulAPITimeout is the duration that the consul API client will + // wait for a response from the API before cancelling the request. + ConsulAPITimeout time.Duration +} + +// initCopyContainer returns the init container spec for the copy container which places +// the consul binary into the shared volume. +func (w *MeshWebhook) initCopyContainer() corev1.Container { + // Copy the Consul binary from the image to the shared volume. + cmd := "cp /bin/consul /consul/connect-inject/consul" + container := corev1.Container{ + Name: InjectInitCopyContainerName, + Image: w.ImageConsul, + Resources: w.InitContainerResources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }, + Command: []string{"/bin/sh", "-ec", cmd}, + } + // If running on OpenShift, don't set the security context and instead let OpenShift set a random user/group for us. + if !w.EnableOpenShift { + container.SecurityContext = &corev1.SecurityContext{ + // Set RunAsUser because the default user for the consul container is root and we want to run non-root. + RunAsUser: pointer.Int64(initContainersUserAndGroupID), + RunAsGroup: pointer.Int64(initContainersUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + } + } + return container } // containerInit returns the init container spec for connect-init that polls for the service and the connect proxy service to be registered @@ -45,14 +140,41 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, return corev1.Container{}, err } + dnsEnabled, err := consulDNSEnabled(namespace, pod, w.EnableConsulDNS) + if err != nil { + return corev1.Container{}, err + } + + var consulDNSClusterIP string + if dnsEnabled { + // If Consul DNS is enabled, we find the environment variable that has the value + // of the ClusterIP of the Consul DNS Service. constructDNSServiceHostName returns + // the name of the env variable whose value is the ClusterIP of the Consul DNS Service. + consulDNSClusterIP = os.Getenv(w.constructDNSServiceHostName()) + if consulDNSClusterIP == "" { + return corev1.Container{}, fmt.Errorf("environment variable %s is not found", w.constructDNSServiceHostName()) + } + } + multiPort := mpi.serviceName != "" data := initContainerCommandData{ - AuthMethod: w.AuthMethod, - ConsulNodeName: ConsulNodeName, - MultiPort: multiPort, - LogLevel: w.LogLevel, - LogJSON: w.LogJSON, + AuthMethod: w.AuthMethod, + ConsulPartition: w.ConsulPartition, + ConsulNamespace: w.consulNamespace(namespace.Name), + NamespaceMirroringEnabled: w.EnableK8SNSMirroring, + ConsulCACert: w.ConsulCACert, + EnableTransparentProxy: tproxyEnabled, + EnableCNI: w.EnableCNI, + TProxyExcludeInboundPorts: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeInboundPorts, pod), + TProxyExcludeOutboundPorts: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundPorts, pod), + TProxyExcludeOutboundCIDRs: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundCIDRs, pod), + TProxyExcludeUIDs: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeUIDs, pod), + ConsulDNSClusterIP: consulDNSClusterIP, + EnvoyUID: envoyUserAndGroupID, + MultiPort: multiPort, + EnvoyAdminPort: 19000 + mpi.serviceIndex, + ConsulAPITimeout: w.ConsulAPITimeout, } // Create expected volume mounts @@ -68,7 +190,6 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, } else { data.ServiceName = pod.Annotations[annotationService] } - var bearerTokenFile string if w.AuthMethod != "" { if multiPort { // If multi port then we require that the service account name @@ -78,16 +199,59 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, data.ServiceAccountName = pod.Spec.ServiceAccountName } // Extract the service account token's volume mount - var saTokenVolumeMount corev1.VolumeMount - saTokenVolumeMount, bearerTokenFile, err = findServiceAccountVolumeMount(pod, mpi.serviceName) + saTokenVolumeMount, bearerTokenFile, err := findServiceAccountVolumeMount(pod, multiPort, mpi.serviceName) if err != nil { return corev1.Container{}, err } + data.BearerTokenFile = bearerTokenFile // Append to volume mounts volMounts = append(volMounts, saTokenVolumeMount) } + // This determines how to configure the consul connect envoy command: what + // metrics backend to use and what path to expose on the + // envoy_prometheus_bind_addr listener for scraping. + metricsServer, err := w.MetricsConfig.shouldRunMergedMetricsServer(pod) + if err != nil { + return corev1.Container{}, err + } + if metricsServer { + prometheusScrapePath := w.MetricsConfig.prometheusScrapePath(pod) + mergedMetricsPort, err := w.MetricsConfig.mergedMetricsPort(pod) + if err != nil { + return corev1.Container{}, err + } + data.PrometheusScrapePath = prometheusScrapePath + data.PrometheusBackendPort = mergedMetricsPort + } + // Pull the TLS config from the relevant annotations. + if raw, ok := pod.Annotations[annotationPrometheusCAFile]; ok && raw != "" { + data.PrometheusCAFile = raw + } + if raw, ok := pod.Annotations[annotationPrometheusCAPath]; ok && raw != "" { + data.PrometheusCAPath = raw + } + if raw, ok := pod.Annotations[annotationPrometheusCertFile]; ok && raw != "" { + data.PrometheusCertFile = raw + } + if raw, ok := pod.Annotations[annotationPrometheusKeyFile]; ok && raw != "" { + data.PrometheusKeyFile = raw + } + + // Validate required Prometheus TLS config is present if set. + if data.PrometheusCertFile != "" || data.PrometheusKeyFile != "" || data.PrometheusCAFile != "" || data.PrometheusCAPath != "" { + if data.PrometheusCAFile == "" && data.PrometheusCAPath == "" { + return corev1.Container{}, fmt.Errorf("Must set one of %q or %q when providing prometheus TLS config", annotationPrometheusCAFile, annotationPrometheusCAPath) + } + if data.PrometheusCertFile == "" { + return corev1.Container{}, fmt.Errorf("Must set %q when providing prometheus TLS config", annotationPrometheusCertFile) + } + if data.PrometheusKeyFile == "" { + return corev1.Container{}, fmt.Errorf("Must set %q when providing prometheus TLS config", annotationPrometheusKeyFile) + } + } + // Render the command var buf bytes.Buffer tpl := template.Must(template.New("root").Parse(strings.TrimSpace( @@ -106,32 +270,28 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, Image: w.ImageConsulK8S, Env: []corev1.EnvVar{ { - Name: "POD_NAME", + Name: "HOST_IP", ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.hostIP"}, }, }, { - Name: "POD_NAMESPACE", + Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, }, }, { - Name: "CONSUL_ADDRESSES", - Value: w.ConsulAddress, - }, - { - Name: "CONSUL_GRPC_PORT", - Value: strconv.Itoa(w.ConsulConfig.GRPCPort), - }, - { - Name: "CONSUL_HTTP_PORT", - Value: strconv.Itoa(w.ConsulConfig.HTTPPort), + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, + }, }, { - Name: "CONSUL_API_TIMEOUT", - Value: w.ConsulConfig.APITimeout.String(), + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + }, }, }, Resources: w.InitContainerResources, @@ -139,92 +299,10 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, Command: []string{"/bin/sh", "-ec", buf.String()}, } - if w.TLSEnabled { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_USE_TLS", - Value: "true", - }, - corev1.EnvVar{ - Name: "CONSUL_CACERT_PEM", - Value: w.ConsulCACert, - }, - corev1.EnvVar{ - Name: "CONSUL_TLS_SERVER_NAME", - Value: w.ConsulTLSServerName, - }) - } - - if w.AuthMethod != "" { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_AUTH_METHOD", - Value: w.AuthMethod, - }, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: bearerTokenFile, - }, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_META", - Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", - }) - - if w.EnableNamespaces { - if w.EnableK8SNSMirroring { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_NAMESPACE", - Value: "default", - }) - } else { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_NAMESPACE", - Value: w.consulNamespace(namespace.Name), - }) - } - } - - if w.ConsulPartition != "" { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_PARTITION", - Value: w.ConsulPartition, - }) - } - } - if w.EnableNamespaces { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_NAMESPACE", - Value: w.consulNamespace(namespace.Name), - }) - } - - if w.ConsulPartition != "" { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_PARTITION", - Value: w.ConsulPartition, - }) - } - if tproxyEnabled { + // Running consul connect redirect-traffic with iptables + // requires both being a root user and having NET_ADMIN capability. if !w.EnableCNI { - // Set redirect traffic config for the container so that we can apply iptables rules. - redirectTrafficConfig, err := w.iptablesConfigJSON(pod, namespace) - if err != nil { - return corev1.Container{}, err - } - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_REDIRECT_TRAFFIC_CONFIG", - Value: redirectTrafficConfig, - }) - - // Running consul connect redirect-traffic with iptables - // requires both being a root user and having NET_ADMIN capability. container.SecurityContext = &corev1.SecurityContext{ RunAsUser: pointer.Int64(rootUserAndGroupID), RunAsGroup: pointer.Int64(rootUserAndGroupID), @@ -251,6 +329,15 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, return container, nil } +// constructDNSServiceHostName use the resource prefix and the DNS Service hostname suffix to construct the +// key of the env variable whose value is the cluster IP of the Consul DNS Service. +// It translates "resource-prefix" into "RESOURCE_PREFIX_DNS_SERVICE_HOST". +func (w *MeshWebhook) constructDNSServiceHostName() string { + upcaseResourcePrefix := strings.ToUpper(w.ResourcePrefix) + upcaseResourcePrefixWithUnderscores := strings.ReplaceAll(upcaseResourcePrefix, "-", "_") + return strings.Join([]string{upcaseResourcePrefixWithUnderscores, dnsServiceHostEnvSuffix}, "_") +} + // transparentProxyEnabled returns true if transparent proxy should be enabled for this pod. // It returns an error when the annotation value cannot be parsed by strconv.ParseBool or if we are unable // to read the pod's namespace label when it exists. @@ -297,13 +384,36 @@ func splitCommaSeparatedItemsFromAnnotation(annotation string, pod corev1.Pod) [ // initContainerCommandTpl is the template for the command executed by // the init container. const initContainerCommandTpl = ` +{{- if .ConsulCACert}} +export CONSUL_HTTP_ADDR="https://${HOST_IP}:8501" +export CONSUL_GRPC_ADDR="https://${HOST_IP}:8502" +export CONSUL_CACERT=/consul/connect-inject/consul-ca.pem +cat </consul/connect-inject/consul-ca.pem +{{ .ConsulCACert }} +EOF +{{- else}} +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +{{- end}} consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name={{ .ConsulNodeName }} \ - -log-level={{ .LogLevel }} \ - -log-json={{ .LogJSON }} \ + -consul-api-timeout={{ .ConsulAPITimeout }} \ {{- if .AuthMethod }} + -acl-auth-method="{{ .AuthMethod }}" \ -service-account-name="{{ .ServiceAccountName }}" \ -service-name="{{ .ServiceName }}" \ + -bearer-token-file={{ .BearerTokenFile }} \ + {{- if .MultiPort }} + -acl-token-sink=/consul/connect-inject/acl-token-{{ .ServiceName }} \ + {{- end }} + {{- if .ConsulNamespace }} + {{- if .NamespaceMirroringEnabled }} + {{- /* If namespace mirroring is enabled, the auth method is + defined in the default namespace */}} + -auth-method-namespace="default" \ + {{- else }} + -auth-method-namespace="{{ .ConsulNamespace }}" \ + {{- end }} + {{- end }} {{- end }} {{- if .MultiPort }} -multiport=true \ @@ -312,4 +422,90 @@ consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD -service-name="{{ .ServiceName }}" \ {{- end }} {{- end }} + {{- if .ConsulPartition }} + -partition="{{ .ConsulPartition }}" \ + {{- end }} + {{- if .ConsulNamespace }} + -consul-service-namespace="{{ .ConsulNamespace }}" \ + {{- end }} + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + {{- if .MultiPort }} + -proxy-id="$(cat /consul/connect-inject/proxyid-{{.ServiceName}})" \ + {{- else }} + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + {{- end }} + {{- if .PrometheusScrapePath }} + -prometheus-scrape-path="{{ .PrometheusScrapePath }}" \ + {{- end }} + {{- if .PrometheusBackendPort }} + -prometheus-backend-port="{{ .PrometheusBackendPort }}" \ + {{- end }} + {{- if .PrometheusCAFile }} + -prometheus-ca-file="{{ .PrometheusCAFile }}" \ + {{- end }} + {{- if .PrometheusCAPath }} + -prometheus-ca-path="{{ .PrometheusCAPath }}" \ + {{- end }} + {{- if .PrometheusCertFile }} + -prometheus-cert-file="{{ .PrometheusCertFile }}" \ + {{- end }} + {{- if .PrometheusKeyFile }} + -prometheus-key-file="{{ .PrometheusKeyFile }}" \ + {{- end }} + {{- if .AuthMethod }} + {{- if .MultiPort }} + -token-file="/consul/connect-inject/acl-token-{{ .ServiceName }}" \ + {{- else }} + -token-file="/consul/connect-inject/acl-token" \ + {{- end }} + {{- end }} + {{- if .ConsulPartition }} + -partition="{{ .ConsulPartition }}" \ + {{- end }} + {{- if .ConsulNamespace }} + -namespace="{{ .ConsulNamespace }}" \ + {{- end }} + {{- if .MultiPort }} + -admin-bind=127.0.0.1:{{ .EnvoyAdminPort }} \ + {{- end }} + -bootstrap > {{ if .MultiPort }}/consul/connect-inject/envoy-bootstrap-{{.ServiceName}}.yaml{{ else }}/consul/connect-inject/envoy-bootstrap.yaml{{ end }} + + +{{- if .EnableTransparentProxy }} +{{- if not .EnableCNI }} +{{- /* The newline below is intentional to allow extra space + in the rendered template between this and the previous commands. */}} + +# Apply traffic redirection rules. +/consul/connect-inject/consul connect redirect-traffic \ + {{- if .AuthMethod }} + -token-file="/consul/connect-inject/acl-token" \ + {{- end }} + {{- if .ConsulPartition }} + -partition="{{ .ConsulPartition }}" \ + {{- end }} + {{- if .ConsulNamespace }} + -namespace="{{ .ConsulNamespace }}" \ + {{- end }} + {{- if .ConsulDNSClusterIP }} + -consul-dns-ip="{{ .ConsulDNSClusterIP }}" \ + {{- end }} + {{- range .TProxyExcludeInboundPorts }} + -exclude-inbound-port="{{ . }}" \ + {{- end }} + {{- range .TProxyExcludeOutboundPorts }} + -exclude-outbound-port="{{ . }}" \ + {{- end }} + {{- range .TProxyExcludeOutboundCIDRs }} + -exclude-outbound-cidr="{{ . }}" \ + {{- end }} + {{- range .TProxyExcludeUIDs }} + -exclude-uid="{{ . }}" \ + {{- end }} + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid={{ .EnvoyUID }} +{{- end }} +{{- end }} ` diff --git a/control-plane/connect-inject/container_init_test.go b/control-plane/connect-inject/container_init_test.go index 66211f097f..22f14c1f73 100644 --- a/control-plane/connect-inject/container_init_test.go +++ b/control-plane/connect-inject/container_init_test.go @@ -2,11 +2,11 @@ package connectinject import ( "fmt" + "os" "strings" "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -48,46 +48,35 @@ func TestHandlerContainerInit(t *testing.T) { Name string Pod func(*corev1.Pod) *corev1.Pod Webhook MeshWebhook - ExpCmd string // Strings.Contains test - ExpEnv []corev1.EnvVar + Cmd string // Strings.Contains test + CmdNot string // Not contains + ErrStr string // Error contains }{ + // The first test checks the whole template. Subsequent tests check + // the parts that change. { - "default cmd and env", + "Whole template by default", func(pod *corev1.Pod) *corev1.Pod { pod.Annotations[annotationService] = "web" return pod }, - MeshWebhook{ - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - LogLevel: "info", - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "0s", - }, - }, + MeshWebhook{}, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=0s \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + "", + "", }, { - "with auth method", + "When auth method is set -service-account-name and -service-name are passed in", func(pod *corev1.Pod) *corev1.Pod { pod.Annotations[annotationService] = "web" pod.Spec.ServiceAccountName = "a-service-account-name" @@ -100,133 +89,299 @@ func TestHandlerContainerInit(t *testing.T) { return pod }, MeshWebhook{ - AuthMethod: "an-auth-method", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - LogLevel: "debug", - LogJSON: true, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=debug \ - -log-json=true \ + AuthMethod: "an-auth-method", + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="an-auth-method" \ -service-account-name="a-service-account-name" \ - -service-name="web" \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_LOGIN_AUTH_METHOD", - Value: "an-auth-method", - }, - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - { - Name: "CONSUL_LOGIN_META", - Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", - }, + -service-name="web" \ +`, + "", + "", + }, + { + "When running the merged metrics server, configures consul connect envoy command", + func(pod *corev1.Pod) *corev1.Pod { + // The annotations to enable metrics, enable merging, and + // service metrics port make the condition to run the merged + // metrics server true. When that is the case, + // prometheusScrapePath and mergedMetricsPort should get + // rendered as -prometheus-scrape-path and + // -prometheus-backend-port to the consul connect envoy command. + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationEnableMetrics] = "true" + pod.Annotations[annotationEnableMetricsMerging] = "true" + pod.Annotations[annotationMergedMetricsPort] = "20100" + pod.Annotations[annotationServiceMetricsPort] = "1234" + pod.Annotations[annotationPrometheusScrapePort] = "22222" + pod.Annotations[annotationPrometheusScrapePath] = "/scrape-path" + pod.Annotations[annotationPrometheusCAFile] = "/certs/ca.crt" + pod.Annotations[annotationPrometheusCAPath] = "/certs/ca/" + pod.Annotations[annotationPrometheusCertFile] = "/certs/server.crt" + pod.Annotations[annotationPrometheusKeyFile] = "/certs/key.pem" + return pod + }, + MeshWebhook{ + ConsulAPITimeout: 5 * time.Second, + }, + `# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -prometheus-scrape-path="/scrape-path" \ + -prometheus-backend-port="20100" \ + -prometheus-ca-file="/certs/ca.crt" \ + -prometheus-ca-path="/certs/ca/" \ + -prometheus-cert-file="/certs/server.crt" \ + -prometheus-key-file="/certs/key.pem" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + "", + "", + }, + { + "When providing Prometheus TLS config, missing CA gives an error", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationEnableMetrics] = "true" + pod.Annotations[annotationEnableMetricsMerging] = "true" + pod.Annotations[annotationMergedMetricsPort] = "20100" + pod.Annotations[annotationPrometheusScrapePort] = "22222" + pod.Annotations[annotationPrometheusScrapePath] = "/scrape-path" + pod.Annotations[annotationPrometheusCertFile] = "/certs/server.crt" + pod.Annotations[annotationPrometheusKeyFile] = "/certs/key.pem" + return pod + }, + MeshWebhook{ + ConsulAPITimeout: 5 * time.Second, + }, + "", + "", + fmt.Sprintf("Must set one of %q or %q", annotationPrometheusCAFile, annotationPrometheusCAPath), + }, + { + "When providing Prometheus TLS config, missing cert gives an error", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationEnableMetrics] = "true" + pod.Annotations[annotationEnableMetricsMerging] = "true" + pod.Annotations[annotationMergedMetricsPort] = "20100" + pod.Annotations[annotationPrometheusScrapePort] = "22222" + pod.Annotations[annotationPrometheusScrapePath] = "/scrape-path" + pod.Annotations[annotationPrometheusCAFile] = "/certs/ca.crt" + pod.Annotations[annotationPrometheusKeyFile] = "/certs/key.pem" + return pod }, + MeshWebhook{ + ConsulAPITimeout: 5 * time.Second, + }, + "", + "", + fmt.Sprintf("Must set %q", annotationPrometheusCertFile), + }, + { + "When providing Prometheus TLS config, missing key gives an error", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationEnableMetrics] = "true" + pod.Annotations[annotationEnableMetricsMerging] = "true" + pod.Annotations[annotationMergedMetricsPort] = "20100" + pod.Annotations[annotationPrometheusScrapePort] = "22222" + pod.Annotations[annotationPrometheusScrapePath] = "/scrape-path" + pod.Annotations[annotationPrometheusCAPath] = "/certs/ca/" + pod.Annotations[annotationPrometheusCertFile] = "/certs/server.crt" + return pod + }, + MeshWebhook{ + ConsulAPITimeout: 5 * time.Second, + }, + "", + "", + fmt.Sprintf("Must set %q", annotationPrometheusKeyFile), }, } for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - w := tt.Webhook + require := require.New(t) + + h := tt.Webhook pod := *tt.Pod(minimal()) - container, err := w.containerInit(testNS, pod, multiPortInfo{}) - require.NoError(t, err) + container, err := h.containerInit(testNS, pod, multiPortInfo{}) + if tt.ErrStr == "" { + require.NoError(err) + } else { + require.Contains(err.Error(), tt.ErrStr) + } actual := strings.Join(container.Command, " ") - require.Contains(t, actual, tt.ExpCmd) - require.EqualValues(t, container.Env[2:], tt.ExpEnv) + require.Contains(actual, tt.Cmd) + if tt.CmdNot != "" { + require.NotContains(actual, tt.CmdNot) + } }) } } func TestHandlerContainerInit_transparentProxy(t *testing.T) { cases := map[string]struct { - globalEnabled bool - cniEnabled bool - annotations map[string]string - expTproxyEnabled bool - namespaceLabel map[string]string + globalEnabled bool + cniEnabled bool + annotations map[string]string + expectedContainsCmd string + expectedNotContainsCmd string + namespaceLabel map[string]string }{ "enabled globally, ns not set, annotation not provided, cni disabled": { true, false, nil, - true, + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", nil, }, "enabled globally, ns not set, annotation is false, cni disabled": { true, false, map[string]string{keyTransparentProxy: "false"}, - false, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, nil, }, "enabled globally, ns not set, annotation is true, cni disabled": { true, false, map[string]string{keyTransparentProxy: "true"}, - true, + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", nil, }, "disabled globally, ns not set, annotation not provided, cni disabled": { false, false, nil, - false, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, nil, }, "disabled globally, ns not set, annotation is false, cni disabled": { false, false, map[string]string{keyTransparentProxy: "false"}, - false, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, nil, }, "disabled globally, ns not set, annotation is true, cni disabled": { false, false, map[string]string{keyTransparentProxy: "true"}, + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "exclude-inbound-ports, ns is not set, annotation is provided, cni disabled": { true, + false, + map[string]string{ + keyTransparentProxy: "true", + annotationTProxyExcludeInboundPorts: "9090,9091", + }, + `/consul/connect-inject/consul connect redirect-traffic \ + -exclude-inbound-port="9090" \ + -exclude-inbound-port="9091" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", nil, }, - "disabled globally, ns enabled, annotation not set, cni disabled": { + "exclude-outbound-ports, ns is not set, annotation is provided, cni disabled": { + true, false, + map[string]string{ + keyTransparentProxy: "true", + annotationTProxyExcludeOutboundPorts: "9090,9091", + }, + `/consul/connect-inject/consul connect redirect-traffic \ + -exclude-outbound-port="9090" \ + -exclude-outbound-port="9091" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "exclude-outbound-cidrs annotation is provided, cni disabled": { + true, false, + map[string]string{ + keyTransparentProxy: "true", + annotationTProxyExcludeOutboundCIDRs: "1.1.1.1,2.2.2.2/24", + }, + `/consul/connect-inject/consul connect redirect-traffic \ + -exclude-outbound-cidr="1.1.1.1" \ + -exclude-outbound-cidr="2.2.2.2/24" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", nil, + }, + "exclude-uids annotation is provided, ns is not set, cni disabled": { true, + false, + map[string]string{ + keyTransparentProxy: "true", + annotationTProxyExcludeUIDs: "6000,7000", + }, + `/consul/connect-inject/consul connect redirect-traffic \ + -exclude-uid="6000" \ + -exclude-uid="7000" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "disabled globally, ns enabled, annotation not set, cni disabled": { + false, + false, + nil, + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", map[string]string{keyTransparentProxy: "true"}, }, "enabled globally, ns disabled, annotation not set, cni disabled": { true, false, nil, - false, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, map[string]string{keyTransparentProxy: "false"}, }, "disabled globally, ns enabled, annotation not set, cni enabled": { false, true, nil, - false, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, map[string]string{keyTransparentProxy: "true"}, }, @@ -234,7 +389,10 @@ func TestHandlerContainerInit_transparentProxy(t *testing.T) { true, true, nil, - false, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, nil, }, } @@ -242,49 +400,162 @@ func TestHandlerContainerInit_transparentProxy(t *testing.T) { t.Run(name, func(t *testing.T) { w := MeshWebhook{ EnableTransparentProxy: c.globalEnabled, + ConsulAPITimeout: 5 * time.Second, EnableCNI: c.cniEnabled, - ConsulConfig: &consul.Config{HTTPPort: 8500}, } pod := minimal() pod.Annotations = c.annotations - var expectedSecurityContext *corev1.SecurityContext - if c.cniEnabled { - expectedSecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(initContainersUserAndGroupID), - RunAsGroup: pointer.Int64(initContainersUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - Privileged: pointer.Bool(false), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, + expectedSecurityContext := &corev1.SecurityContext{} + if !c.cniEnabled { + expectedSecurityContext.RunAsUser = pointer.Int64(0) + expectedSecurityContext.RunAsGroup = pointer.Int64(0) + expectedSecurityContext.RunAsNonRoot = pointer.Bool(false) + expectedSecurityContext.Privileged = pointer.Bool(true) + expectedSecurityContext.Capabilities = &corev1.Capabilities{ + Add: []corev1.Capability{netAdminCapability}, } - } else if c.expTproxyEnabled { - expectedSecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(0), - RunAsGroup: pointer.Int64(0), - RunAsNonRoot: pointer.Bool(false), - Privileged: pointer.Bool(true), - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{netAdminCapability}, - }, + } else { + + expectedSecurityContext.RunAsUser = pointer.Int64(initContainersUserAndGroupID) + expectedSecurityContext.RunAsGroup = pointer.Int64(initContainersUserAndGroupID) + expectedSecurityContext.RunAsNonRoot = pointer.Bool(true) + expectedSecurityContext.Privileged = pointer.Bool(false) + expectedSecurityContext.Capabilities = &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, } } ns := testNS ns.Labels = c.namespaceLabel container, err := w.containerInit(ns, *pod, multiPortInfo{}) require.NoError(t, err) + actualCmd := strings.Join(container.Command, " ") - redirectTrafficEnvVarFound := false - for _, ev := range container.Env { - if ev.Name == "CONSUL_REDIRECT_TRAFFIC_CONFIG" { - redirectTrafficEnvVarFound = true - break + if c.expectedContainsCmd != "" { + require.Equal(t, expectedSecurityContext, container.SecurityContext) + require.Contains(t, actualCmd, c.expectedContainsCmd) + } else { + if !c.cniEnabled { + require.Nil(t, container.SecurityContext) + } else { + require.Equal(t, expectedSecurityContext, container.SecurityContext) } + require.NotContains(t, actualCmd, c.expectedNotContainsCmd) } + }) + } +} + +func TestHandlerContainerInit_consulDNS(t *testing.T) { + cases := map[string]struct { + globalEnabled bool + annotations map[string]string + expectedContainsCmd string + namespaceLabel map[string]string + }{ + "enabled globally, ns not set, annotation not provided": { + globalEnabled: true, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -consul-dns-ip="10.0.34.16" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "enabled globally, ns not set, annotation is false": { + globalEnabled: true, + annotations: map[string]string{keyConsulDNS: "false"}, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "enabled globally, ns not set, annotation is true": { + globalEnabled: true, + annotations: map[string]string{keyConsulDNS: "true"}, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -consul-dns-ip="10.0.34.16" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "disabled globally, ns not set, annotation not provided": { + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "disabled globally, ns not set, annotation is false": { + annotations: map[string]string{keyConsulDNS: "false"}, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "disabled globally, ns not set, annotation is true": { + annotations: map[string]string{keyConsulDNS: "true"}, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -consul-dns-ip="10.0.34.16" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "disabled globally, ns enabled, annotation not set": { + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -consul-dns-ip="10.0.34.16" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + namespaceLabel: map[string]string{keyConsulDNS: "true"}, + }, + "enabled globally, ns disabled, annotation not set": { + globalEnabled: true, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + namespaceLabel: map[string]string{keyConsulDNS: "false"}, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + w := MeshWebhook{ + EnableConsulDNS: c.globalEnabled, + EnableTransparentProxy: true, + ResourcePrefix: "consul-consul", + ConsulAPITimeout: 5 * time.Second, + } + os.Setenv("CONSUL_CONSUL_DNS_SERVICE_HOST", "10.0.34.16") + defer os.Unsetenv("CONSUL_CONSUL_DNS_SERVICE_HOST") + + pod := minimal() + pod.Annotations = c.annotations + + ns := testNS + ns.Labels = c.namespaceLabel + container, err := w.containerInit(ns, *pod, multiPortInfo{}) + require.NoError(t, err) + actualCmd := strings.Join(container.Command, " ") + + require.Contains(t, actualCmd, c.expectedContainsCmd) + }) + } +} + +func TestHandler_constructDNSServiceHostName(t *testing.T) { + cases := []struct { + prefix string + result string + }{ + { + prefix: "consul-consul", + result: "CONSUL_CONSUL_DNS_SERVICE_HOST", + }, + { + prefix: "release", + result: "RELEASE_DNS_SERVICE_HOST", + }, + { + prefix: "consul-dc1", + result: "CONSUL_DC1_DNS_SERVICE_HOST", + }, + } - require.Equal(t, c.expTproxyEnabled, redirectTrafficEnvVarFound) - require.Equal(t, expectedSecurityContext, container.SecurityContext) + for _, c := range cases { + t.Run(c.prefix, func(t *testing.T) { + w := MeshWebhook{ResourcePrefix: c.prefix, ConsulAPITimeout: 5 * time.Second} + require.Equal(t, c.result, w.constructDNSServiceHostName()) }) } } @@ -325,11 +596,10 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { Name string Pod func(*corev1.Pod) *corev1.Pod Webhook MeshWebhook - Cmd string - ExpEnv []corev1.EnvVar + Cmd string // Strings.Contains test }{ { - "default namespace, no partition", + "whole template, default namespace, no partition", func(pod *corev1.Pod) *corev1.Pod { pod.Annotations[annotationService] = "web" return pod @@ -338,38 +608,23 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { EnableNamespaces: true, ConsulDestinationNamespace: "default", ConsulPartition: "", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "default", - }, + ConsulAPITimeout: 5 * time.Second, }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -consul-service-namespace="default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -namespace="default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, }, { - "default namespace, default partition", + "whole template, default namespace, default partition", func(pod *corev1.Pod) *corev1.Pod { pod.Annotations[annotationService] = "web" return pod @@ -378,42 +633,25 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { EnableNamespaces: true, ConsulDestinationNamespace: "default", ConsulPartition: "default", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "default", - }, - { - Name: "CONSUL_PARTITION", - Value: "default", - }, + ConsulAPITimeout: 5 * time.Second, }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -partition="default" \ + -consul-service-namespace="default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -partition="default" \ + -namespace="default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, }, { - "non-default namespace, no partition", + "whole template, non-default namespace, no partition", func(pod *corev1.Pod) *corev1.Pod { pod.Annotations[annotationService] = "web" return pod @@ -422,38 +660,23 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { EnableNamespaces: true, ConsulDestinationNamespace: "non-default", ConsulPartition: "", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "non-default", - }, + ConsulAPITimeout: 5 * time.Second, }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -consul-service-namespace="non-default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, }, { - "non-default namespace, non-default partition", + "whole template, non-default namespace, non-default partition", func(pod *corev1.Pod) *corev1.Pod { pod.Annotations[annotationService] = "web" return pod @@ -462,42 +685,25 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { EnableNamespaces: true, ConsulDestinationNamespace: "non-default", ConsulPartition: "non-default-part", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "non-default", - }, - { - Name: "CONSUL_PARTITION", - Value: "non-default-part", - }, + ConsulAPITimeout: 5 * time.Second, }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -partition="non-default-part" \ + -consul-service-namespace="non-default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -partition="non-default-part" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, }, { - "auth method, non-default namespace, mirroring disabled, default partition", + "Whole template, auth method, non-default namespace, mirroring disabled, default partition", func(pod *corev1.Pod) *corev1.Pod { pod.Annotations[annotationService] = "" return pod @@ -507,64 +713,31 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { EnableNamespaces: true, ConsulDestinationNamespace: "non-default", ConsulPartition: "default", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, + ConsulAPITimeout: 5 * time.Second, }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \ + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ -service-account-name="web" \ - -service-name="" \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_LOGIN_AUTH_METHOD", - Value: "auth-method", - }, - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - { - Name: "CONSUL_LOGIN_META", - Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", - }, - { - Name: "CONSUL_LOGIN_NAMESPACE", - Value: "non-default", - }, - { - Name: "CONSUL_LOGIN_PARTITION", - Value: "default", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "non-default", - }, - { - Name: "CONSUL_PARTITION", - Value: "default", - }, - }, + -service-name="" \ + -bearer-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token \ + -auth-method-namespace="non-default" \ + -partition="default" \ + -consul-service-namespace="non-default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -token-file="/consul/connect-inject/acl-token" \ + -partition="default" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, }, { - "auth method, non-default namespace, mirroring enabled, non-default partition", + "Whole template, auth method, non-default namespace, mirroring enabled, non-default partition", func(pod *corev1.Pod) *corev1.Pod { pod.Annotations[annotationService] = "" return pod @@ -575,75 +748,152 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { ConsulDestinationNamespace: "non-default", // Overridden by mirroring EnableK8SNSMirroring: true, ConsulPartition: "non-default", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, + ConsulAPITimeout: 5 * time.Second, }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \ + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ -service-account-name="web" \ - -service-name="" \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_LOGIN_AUTH_METHOD", - Value: "auth-method", - }, - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - { - Name: "CONSUL_LOGIN_META", - Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", - }, - { - Name: "CONSUL_LOGIN_NAMESPACE", - Value: "default", - }, - { - Name: "CONSUL_LOGIN_PARTITION", - Value: "non-default", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "k8snamespace", - }, - { - Name: "CONSUL_PARTITION", - Value: "non-default", - }, + -service-name="" \ + -bearer-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token \ + -auth-method-namespace="default" \ + -partition="non-default" \ + -consul-service-namespace="k8snamespace" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -token-file="/consul/connect-inject/acl-token" \ + -partition="non-default" \ + -namespace="k8snamespace" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + }, + { + "whole template, default namespace, tproxy enabled, no partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + ConsulPartition: "", + EnableTransparentProxy: true, + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -consul-service-namespace="default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -namespace="default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Apply traffic redirection rules. +/consul/connect-inject/consul connect redirect-traffic \ + -namespace="default" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + { + "whole template, non-default namespace, tproxy enabled, default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulPartition: "default", + ConsulDestinationNamespace: "non-default", + EnableTransparentProxy: true, + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -partition="default" \ + -consul-service-namespace="non-default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -partition="default" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Apply traffic redirection rules. +/consul/connect-inject/consul connect redirect-traffic \ + -partition="default" \ + -namespace="non-default" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + + { + "Whole template, auth method, non-default namespace, mirroring enabled, tproxy enabled, non-default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{ + AuthMethod: "auth-method", + EnableNamespaces: true, + ConsulPartition: "non-default", + ConsulDestinationNamespace: "non-default", // Overridden by mirroring + EnableK8SNSMirroring: true, + EnableTransparentProxy: true, + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ + -service-account-name="web" \ + -service-name="web" \ + -bearer-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token \ + -auth-method-namespace="default" \ + -partition="non-default" \ + -consul-service-namespace="k8snamespace" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -token-file="/consul/connect-inject/acl-token" \ + -partition="non-default" \ + -namespace="k8snamespace" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Apply traffic redirection rules. +/consul/connect-inject/consul connect redirect-traffic \ + -token-file="/consul/connect-inject/acl-token" \ + -partition="non-default" \ + -namespace="k8snamespace" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, }, } for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - h := tt.Webhook - h.LogLevel = "info" - container, err := h.containerInit(testNS, *tt.Pod(minimal()), multiPortInfo{}) - require.NoError(t, err) + require := require.New(t) + + w := tt.Webhook + container, err := w.containerInit(testNS, *tt.Pod(minimal()), multiPortInfo{}) + require.NoError(err) actual := strings.Join(container.Command, " ") - require.Equal(t, tt.Cmd, actual) - if tt.ExpEnv != nil { - require.Equal(t, tt.ExpEnv, container.Env[2:]) - } + require.Equal(tt.Cmd, actual) }) } } @@ -698,18 +948,13 @@ func TestHandlerContainerInit_Multiport(t *testing.T) { NumInitContainers int MultiPortInfos []multiPortInfo Cmd []string // Strings.Contains test - ExpEnvVars []corev1.EnvVar }{ { "Whole template, multiport", func(pod *corev1.Pod) *corev1.Pod { return pod }, - MeshWebhook{ - LogLevel: "info", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - }, + MeshWebhook{ConsulAPITimeout: 5 * time.Second}, 2, []multiPortInfo{ { @@ -721,23 +966,37 @@ func TestHandlerContainerInit_Multiport(t *testing.T) { serviceName: "web-admin", }, }, - []string{`/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \ + []string{ + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ -multiport=true \ -proxy-id-file=/consul/connect-inject/proxyid-web \ - -service-name="web" \`, + -service-name="web" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web)" \ + -admin-bind=127.0.0.1:19000 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web.yaml`, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \ + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ -multiport=true \ -proxy-id-file=/consul/connect-inject/proxyid-web-admin \ - -service-name="web-admin" \`, + -service-name="web-admin" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web-admin)" \ + -admin-bind=127.0.0.1:19001 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web-admin.yaml`, }, - nil, }, { "Whole template, multiport, auth method", @@ -745,10 +1004,8 @@ func TestHandlerContainerInit_Multiport(t *testing.T) { return pod }, MeshWebhook{ - AuthMethod: "auth-method", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - LogLevel: "info", + AuthMethod: "auth-method", + ConsulAPITimeout: 5 * time.Second, }, 2, []multiPortInfo{ @@ -761,113 +1018,150 @@ func TestHandlerContainerInit_Multiport(t *testing.T) { serviceName: "web-admin", }, }, - []string{`/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \ + []string{ + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ -service-account-name="web" \ -service-name="web" \ + -bearer-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token \ + -acl-token-sink=/consul/connect-inject/acl-token-web \ -multiport=true \ - -proxy-id-file=/consul/connect-inject/proxyid-web \`, + -proxy-id-file=/consul/connect-inject/proxyid-web \ - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -consul-node-name=k8s-service-mesh \ - -log-level=info \ - -log-json=false \ +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web)" \ + -token-file="/consul/connect-inject/acl-token-web" \ + -admin-bind=127.0.0.1:19000 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web.yaml`, + + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ -service-account-name="web-admin" \ -service-name="web-admin" \ + -bearer-token-file=/consul/serviceaccount-web-admin/token \ + -acl-token-sink=/consul/connect-inject/acl-token-web-admin \ -multiport=true \ - -proxy-id-file=/consul/connect-inject/proxyid-web-admin \`, - }, - []corev1.EnvVar{ - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/consul/serviceaccount-web-admin/token", - }, + -proxy-id-file=/consul/connect-inject/proxyid-web-admin \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web-admin)" \ + -token-file="/consul/connect-inject/acl-token-web-admin" \ + -admin-bind=127.0.0.1:19001 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web-admin.yaml`, }, }, } for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - h := tt.Webhook + require := require.New(t) + + w := tt.Webhook for i := 0; i < tt.NumInitContainers; i++ { - container, err := h.containerInit(testNS, *tt.Pod(minimal()), tt.MultiPortInfos[i]) - require.NoError(t, err) + container, err := w.containerInit(testNS, *tt.Pod(minimal()), tt.MultiPortInfos[i]) + require.NoError(err) actual := strings.Join(container.Command, " ") - require.Equal(t, tt.Cmd[i], actual) - if tt.ExpEnvVars != nil { - require.Contains(t, container.Env, tt.ExpEnvVars[i]) - } + require.Equal(tt.Cmd[i], actual) } }) } } -// If TLSEnabled is set, -// Consul addresses should use HTTPS -// and CA cert should be set as env variable if provided. -// Additionally, test that the init container is correctly configured -// when http or gRPC ports are different from defaults. -func TestHandlerContainerInit_WithTLSAndCustomPorts(t *testing.T) { - for _, caProvided := range []bool{true, false} { - name := fmt.Sprintf("ca provided: %t", caProvided) - t.Run(name, func(t *testing.T) { - w := MeshWebhook{ - ConsulAddress: "10.0.0.0", - TLSEnabled: true, - ConsulConfig: &consul.Config{HTTPPort: 443, GRPCPort: 8503}, - } - if caProvided { - w.ConsulCACert = "consul-ca-cert" - } - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - annotationService: "foo", - }, - }, +func TestHandlerContainerInit_authMethod(t *testing.T) { + require := require.New(t) + w := MeshWebhook{ + AuthMethod: "release-name-consul-k8s-auth-method", + ConsulAPITimeout: 5 * time.Second, + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + VolumeMounts: []corev1.VolumeMount{ { - Name: "web", + Name: "default-token-podid", + ReadOnly: true, + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", }, }, }, - } - container, err := w.containerInit(testNS, *pod, multiPortInfo{}) - require.NoError(t, err) - require.Equal(t, "CONSUL_ADDRESSES", container.Env[2].Name) - require.Equal(t, w.ConsulAddress, container.Env[2].Value) - require.Equal(t, "CONSUL_GRPC_PORT", container.Env[3].Name) - require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.GRPCPort), container.Env[3].Value) - require.Equal(t, "CONSUL_HTTP_PORT", container.Env[4].Name) - require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.HTTPPort), container.Env[4].Value) - if w.TLSEnabled { - require.Equal(t, "CONSUL_USE_TLS", container.Env[6].Name) - require.Equal(t, "true", container.Env[6].Value) - if caProvided { - require.Equal(t, "CONSUL_CACERT_PEM", container.Env[7].Name) - require.Equal(t, "consul-ca-cert", container.Env[7].Value) - } else { - for _, ev := range container.Env { - if ev.Name == "CONSUL_CACERT_PEM" { - require.Empty(t, ev.Value) - } - } - } - } + }, + ServiceAccountName: "foo", + }, + } + container, err := w.containerInit(testNS, *pod, multiPortInfo{}) + require.NoError(err) + actual := strings.Join(container.Command, " ") + require.Contains(actual, ` +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="release-name-consul-k8s-auth-method"`) + require.Contains(actual, ` +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -token-file="/consul/connect-inject/acl-token" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`) +} - }) +// If Consul CA cert is set, +// Consul addresses should use HTTPS +// and CA cert should be set as env variable. +func TestHandlerContainerInit_WithTLS(t *testing.T) { + require := require.New(t) + w := MeshWebhook{ + ConsulCACert: "consul-ca-cert", + ConsulAPITimeout: 5 * time.Second, + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, } + container, err := w.containerInit(testNS, *pod, multiPortInfo{}) + require.NoError(err) + actual := strings.Join(container.Command, " ") + require.Contains(actual, ` +export CONSUL_HTTP_ADDR="https://${HOST_IP}:8501" +export CONSUL_GRPC_ADDR="https://${HOST_IP}:8502" +export CONSUL_CACERT=/consul/connect-inject/consul-ca.pem +cat </consul/connect-inject/consul-ca.pem +consul-ca-cert +EOF`) + require.NotContains(actual, ` +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502"`) } func TestHandlerContainerInit_Resources(t *testing.T) { + require := require.New(t) w := MeshWebhook{ InitContainerResources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -879,7 +1173,7 @@ func TestHandlerContainerInit_Resources(t *testing.T) { corev1.ResourceMemory: resource.MustParse("25Mi"), }, }, - ConsulConfig: &consul.Config{HTTPPort: 8500, APITimeout: 5 * time.Second}, + ConsulAPITimeout: 5 * time.Second, } pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -897,8 +1191,8 @@ func TestHandlerContainerInit_Resources(t *testing.T) { }, } container, err := w.containerInit(testNS, *pod, multiPortInfo{}) - require.NoError(t, err) - require.Equal(t, corev1.ResourceRequirements{ + require.NoError(err) + require.Equal(corev1.ResourceRequirements{ Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("20m"), corev1.ResourceMemory: resource.MustParse("25Mi"), @@ -910,6 +1204,34 @@ func TestHandlerContainerInit_Resources(t *testing.T) { }, container.Resources) } +// Test that the init copy container has the correct command and SecurityContext. +func TestHandlerInitCopyContainer(t *testing.T) { + openShiftEnabledCases := []bool{false, true} + + for _, openShiftEnabled := range openShiftEnabledCases { + t.Run(fmt.Sprintf("openshift enabled: %t", openShiftEnabled), func(t *testing.T) { + w := MeshWebhook{EnableOpenShift: openShiftEnabled, ConsulAPITimeout: 5 * time.Second} + + container := w.initCopyContainer() + + if openShiftEnabled { + require.Nil(t, container.SecurityContext) + } else { + expectedSecurityContext := &corev1.SecurityContext{ + RunAsUser: pointer.Int64(initContainersUserAndGroupID), + RunAsGroup: pointer.Int64(initContainersUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + } + require.Equal(t, expectedSecurityContext, container.SecurityContext) + } + + actual := strings.Join(container.Command, " ") + require.Contains(t, actual, `cp /bin/consul /consul/connect-inject/consul`) + }) + } +} + var testNS = corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: k8sNamespace, diff --git a/control-plane/connect-inject/container_volume.go b/control-plane/connect-inject/container_volume.go index 2cb086c29a..53ba985f3e 100644 --- a/control-plane/connect-inject/container_volume.go +++ b/control-plane/connect-inject/container_volume.go @@ -14,7 +14,7 @@ func (w *MeshWebhook) containerVolume() corev1.Volume { return corev1.Volume{ Name: volumeName, VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}, + EmptyDir: &corev1.EmptyDirVolumeSource{}, }, } } diff --git a/control-plane/connect-inject/dns.go b/control-plane/connect-inject/dns.go deleted file mode 100644 index fd8f9b1ec3..0000000000 --- a/control-plane/connect-inject/dns.go +++ /dev/null @@ -1,90 +0,0 @@ -package connectinject - -import ( - "fmt" - "strconv" - - "github.com/miekg/dns" - corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" -) - -const ( - // These defaults are taken from the /etc/resolv.conf man page - // and are used by the dns library. - defaultDNSOptionNdots = 1 - defaultDNSOptionTimeout = 5 - defaultDNSOptionAttempts = 2 - - // defaultEtcResolvConfFile is the default location of the /etc/resolv.conf file. - defaultEtcResolvConfFile = "/etc/resolv.conf" -) - -func (w *MeshWebhook) configureDNS(pod *corev1.Pod, k8sNS string) error { - // First, we need to determine the nameservers configured in this cluster from /etc/resolv.conf. - etcResolvConf := defaultEtcResolvConfFile - if w.etcResolvFile != "" { - etcResolvConf = w.etcResolvFile - } - cfg, err := dns.ClientConfigFromFile(etcResolvConf) - if err != nil { - return err - } - - // Set DNS policy on the pod to None because we want DNS to work according to the config we will provide. - pod.Spec.DNSPolicy = corev1.DNSNone - - // Set the consul-dataplane's DNS server as the first server in the list (i.e. localhost). - // We want to do that so that when consul cannot resolve the record, we will fall back to the nameservers - // configured in our /etc/resolv.conf. It's important to add Consul DNS as the first nameserver because - // if we put kube DNS first, it will return NXDOMAIN response and a DNS client will not fall back to other nameservers. - if pod.Spec.DNSConfig == nil { - nameservers := []string{ConsulDataplaneDNSBindHost} - nameservers = append(nameservers, cfg.Servers...) - var options []corev1.PodDNSConfigOption - if cfg.Ndots != defaultDNSOptionNdots { - ndots := strconv.Itoa(cfg.Ndots) - options = append(options, corev1.PodDNSConfigOption{ - Name: "ndots", - Value: &ndots, - }) - } - if cfg.Timeout != defaultDNSOptionTimeout { - options = append(options, corev1.PodDNSConfigOption{ - Name: "timeout", - Value: pointer.String(strconv.Itoa(cfg.Timeout)), - }) - } - if cfg.Attempts != defaultDNSOptionAttempts { - options = append(options, corev1.PodDNSConfigOption{ - Name: "attempts", - Value: pointer.String(strconv.Itoa(cfg.Attempts)), - }) - } - - // Replace release namespace in the searches with the pod namespace. - // This is so that the searches we generate will be for the pod's namespace - // instead of the namespace of the connect-injector. E.g. instead of - // consul.svc.cluster.local it should be .svc.cluster.local. - var searches []string - // Kubernetes will add a search domain for .svc.cluster.local so we can always - // expect it to be there. See https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#namespaces-of-services. - consulReleaseNSSearchDomain := fmt.Sprintf("%s.svc.cluster.local", w.ReleaseNamespace) - for _, search := range cfg.Search { - if search == consulReleaseNSSearchDomain { - searches = append(searches, fmt.Sprintf("%s.svc.cluster.local", k8sNS)) - } else { - searches = append(searches, search) - } - } - - pod.Spec.DNSConfig = &corev1.PodDNSConfig{ - Nameservers: nameservers, - Searches: searches, - Options: options, - } - } else { - return fmt.Errorf("DNS redirection to Consul is not supported with an already defined DNSConfig on the pod") - } - return nil -} diff --git a/control-plane/connect-inject/dns_test.go b/control-plane/connect-inject/dns_test.go deleted file mode 100644 index 99ca5e1c03..0000000000 --- a/control-plane/connect-inject/dns_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package connectinject - -import ( - "os" - "testing" - - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" -) - -func TestMeshWebhook_configureDNS(t *testing.T) { - cases := map[string]struct { - etcResolv string - expDNSConfig *corev1.PodDNSConfig - }{ - "empty /etc/resolv.conf file": { - expDNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"127.0.0.1"}, - }, - }, - "one nameserver": { - etcResolv: `nameserver 1.1.1.1`, - expDNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"127.0.0.1", "1.1.1.1"}, - }, - }, - "mutiple nameservers, searches, and options": { - etcResolv: ` -nameserver 1.1.1.1 -nameserver 2.2.2.2 -search foo.bar bar.baz -options ndots:5 timeout:6 attempts:3`, - expDNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"127.0.0.1", "1.1.1.1", "2.2.2.2"}, - Searches: []string{"foo.bar", "bar.baz"}, - Options: []corev1.PodDNSConfigOption{ - { - Name: "ndots", - Value: pointer.String("5"), - }, - { - Name: "timeout", - Value: pointer.String("6"), - }, - { - Name: "attempts", - Value: pointer.String("3"), - }, - }, - }, - }, - "replaces release specific search domains": { - etcResolv: ` -nameserver 1.1.1.1 -nameserver 2.2.2.2 -search consul.svc.cluster.local svc.cluster.local cluster.local -options ndots:5`, - expDNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"127.0.0.1", "1.1.1.1", "2.2.2.2"}, - Searches: []string{"default.svc.cluster.local", "svc.cluster.local", "cluster.local"}, - Options: []corev1.PodDNSConfigOption{ - { - Name: "ndots", - Value: pointer.String("5"), - }, - }, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - etcResolvFile, err := os.CreateTemp("", "") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.Remove(etcResolvFile.Name()) - }) - _, err = etcResolvFile.WriteString(c.etcResolv) - require.NoError(t, err) - w := MeshWebhook{ - etcResolvFile: etcResolvFile.Name(), - ReleaseNamespace: "consul", - } - - pod := minimal() - err = w.configureDNS(pod, "default") - require.NoError(t, err) - require.Equal(t, corev1.DNSNone, pod.Spec.DNSPolicy) - require.Equal(t, c.expDNSConfig, pod.Spec.DNSConfig) - }) - } -} - -func TestMeshWebhook_configureDNS_error(t *testing.T) { - w := MeshWebhook{} - - pod := minimal() - pod.Spec.DNSConfig = &corev1.PodDNSConfig{Nameservers: []string{"1.1.1.1"}} - err := w.configureDNS(pod, "default") - require.EqualError(t, err, "DNS redirection to Consul is not supported with an already defined DNSConfig on the pod") -} diff --git a/control-plane/connect-inject/endpoints_controller.go b/control-plane/connect-inject/endpoints_controller.go index bb707223b1..9b37ca3bcf 100644 --- a/control-plane/connect-inject/endpoints_controller.go +++ b/control-plane/connect-inject/endpoints_controller.go @@ -8,6 +8,7 @@ import ( "regexp" "strconv" "strings" + "time" mapset "github.com/deckarep/golang-set" "github.com/go-logr/logr" @@ -19,11 +20,17 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" ) const ( @@ -31,19 +38,10 @@ const ( MetaKeyKubeServiceName = "k8s-service-name" MetaKeyKubeNS = "k8s-namespace" MetaKeyManagedBy = "managed-by" - MetaKeySyntheticNode = "synthetic-node" - MetaKeyConsulWANFederation = "consul-wan-federation" TokenMetaPodNameKey = "pod" - - // Gateway types for registration. - MeshGateway = "mesh-gateway" - TerminatingGateway = "terminating-gateway" - IngressGateway = "ingress-gateway" - kubernetesSuccessReasonMsg = "Kubernetes health checks passing" envoyPrometheusBindAddr = "envoy_prometheus_bind_addr" - sidecarContainer = "consul-dataplane" - defaultNS = "default" + envoySidecarContainer = "envoy-sidecar" // clusterIPTaggedAddressName is the key for the tagged address to store the service's cluster IP and service port // in Consul. Note: This value should not be changed without a corresponding change in Consul. @@ -61,33 +59,21 @@ const ( // the ListenerPort for the Expose configuration of the proxy registration for a startup probe. exposedPathsStartupPortsRangeStart = 20500 - // ConsulNodeName is the node name that we'll use to register and deregister services. - ConsulNodeName = "k8s-service-mesh" - - // ConsulNodeAddress is the address of the consul node (defined by ConsulNodeName). - // This address does not need to be routable as this node is ephemeral, and we're only providing it because - // Consul's API currently requires node address to be provided when registering a node. - ConsulNodeAddress = "127.0.0.1" - - // ConsulKubernetesCheckType is the type of health check in Consul for Kubernetes readiness status. - ConsulKubernetesCheckType = "kubernetes-readiness" - - // ConsulKubernetesCheckName is the name of health check in Consul for Kubernetes readiness status. - ConsulKubernetesCheckName = "Kubernetes Readiness Check" - - // EnvoyInboundListenerPort is the port where envoy's inbound listener is listening. - EnvoyInboundListenerPort = 20000 - // proxyDefaultInboundPort is the default inbound port for the proxy. proxyDefaultInboundPort = 20000 ) type EndpointsController struct { client.Client - // ConsulClientConfig is the config for the Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager + // ConsulClient points at the agent local to the connect-inject deployment pod. + ConsulClient *api.Client + // ConsulClientCfg is the client config used by the ConsulClient when calling NewClient(). + ConsulClientCfg *api.Config + // ConsulScheme is the scheme to use when making API calls to Consul, + // i.e. "http" or "https". + ConsulScheme string + // ConsulPort is the port to make HTTP API calls to Consul agents on. + ConsulPort string // Only endpoints in the AllowK8sNamespacesSet are reconciled. AllowK8sNamespacesSet mapset.Set // Endpoints in the DenyK8sNamespacesSet are ignored. @@ -98,9 +84,6 @@ type EndpointsController struct { // EnableConsulNamespaces indicates that a user is running Consul Enterprise // with version 1.7+ which supports namespaces. EnableConsulNamespaces bool - // EnableWANFederation indicates that a user is running Consul with - // WAN Federation enabled. - EnableWANFederation bool // ConsulDestinationNamespace is the name of the Consul namespace to create // all config entries in. If EnableNSMirroring is true this is ignored. ConsulDestinationNamespace string @@ -132,6 +115,9 @@ type EndpointsController struct { // will delete any tokens associated with this auth method // whenever service instances are deregistered. AuthMethod string + // ConsulAPITimeout is the duration that the consul API client will + // wait for a response from the API before cancelling the request. + ConsulAPITimeout time.Duration MetricsConfig MetricsConfig Log logr.Logger @@ -151,19 +137,7 @@ func (r *EndpointsController) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } - // Create Consul client for this reconcile. - serverState, err := r.ConsulServerConnMgr.State() - if err != nil { - r.Log.Error(err, "failed to get Consul server state", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - apiClient, err := consul.NewClientFromConnMgrState(r.ConsulClientConfig, serverState) - if err != nil { - r.Log.Error(err, "failed to create Consul API client", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - - err = r.Client.Get(ctx, req.NamespacedName, &serviceEndpoints) + err := r.Client.Get(ctx, req.NamespacedName, &serviceEndpoints) // endpointPods holds a set of all pods this endpoints object is currently pointing to. // We use this later when we reconcile ACL tokens to decide whether an ACL token in Consul @@ -173,9 +147,9 @@ func (r *EndpointsController) Reconcile(ctx context.Context, req ctrl.Request) ( // If the endpoints object has been deleted (and we get an IsNotFound // error), we need to deregister all instances in Consul for that service. if k8serrors.IsNotFound(err) { - // Deregister all instances in Consul for this service. The function deregisterService handles + // Deregister all instances in Consul for this service. The function deregisterServiceOnAllAgents handles // the case where the Consul service name is different from the Kubernetes service name. - err = r.deregisterService(apiClient, req.Name, req.Namespace, nil) + err = r.deregisterServiceOnAllAgents(ctx, req.Name, req.Namespace, nil) return ctrl.Result{}, err } else if err != nil { r.Log.Error(err, "failed to get Endpoints", "name", req.Name, "ns", req.Namespace) @@ -189,7 +163,7 @@ func (r *EndpointsController) Reconcile(ctx context.Context, req ctrl.Request) ( if isLabeledIgnore(serviceEndpoints.Labels) { // We always deregister the service to handle the case where a user has registered the service, then added the label later. r.Log.Info("Ignoring endpoint labeled with `consul.hashicorp.com/service-ignore: \"true\"`", "name", req.Name, "namespace", req.Namespace) - err = r.deregisterService(apiClient, req.Name, req.Namespace, nil) + err = r.deregisterServiceOnAllAgents(ctx, req.Name, req.Namespace, nil) return ctrl.Result{}, err } @@ -203,14 +177,14 @@ func (r *EndpointsController) Reconcile(ctx context.Context, req ctrl.Request) ( if address.TargetRef != nil && address.TargetRef.Kind == "Pod" { var pod corev1.Pod objectKey := types.NamespacedName{Name: address.TargetRef.Name, Namespace: address.TargetRef.Namespace} - if err = r.Client.Get(ctx, objectKey, &pod); err != nil { + if err := r.Client.Get(ctx, objectKey, &pod); err != nil { r.Log.Error(err, "failed to get pod", "name", address.TargetRef.Name) errs = multierror.Append(errs, err) continue } - svcName, ok := pod.Annotations[annotationKubernetesService] - if ok && serviceEndpoints.Name != svcName { + serviceName, ok := pod.Annotations[annotationKubernetesService] + if ok && serviceEndpoints.Name != serviceName { r.Log.Info("ignoring endpoint because it doesn't match explicit service annotation", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) // deregistration for service instances that don't match the annotation happens // later because we don't add this pod to the endpointAddressMap. @@ -219,18 +193,11 @@ func (r *EndpointsController) Reconcile(ctx context.Context, req ctrl.Request) ( if hasBeenInjected(pod) { endpointPods.Add(address.TargetRef.Name) - if err = r.registerServicesAndHealthCheck(apiClient, pod, serviceEndpoints, healthStatus, endpointAddressMap); err != nil { + if err := r.registerServicesAndHealthCheck(pod, serviceEndpoints, healthStatus, endpointAddressMap); err != nil { r.Log.Error(err, "failed to register services or health check", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) errs = multierror.Append(errs, err) } } - if isGateway(pod) { - endpointPods.Add(address.TargetRef.Name) - if err = r.registerGateway(apiClient, pod, serviceEndpoints, healthStatus, endpointAddressMap); err != nil { - r.Log.Error(err, "failed to register gateway or health check", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) - errs = multierror.Append(errs, err) - } - } } } } @@ -238,8 +205,8 @@ func (r *EndpointsController) Reconcile(ctx context.Context, req ctrl.Request) ( // Compare service instances in Consul with addresses in Endpoints. If an address is not in Endpoints, deregister // from Consul. This uses endpointAddressMap which is populated with the addresses in the Endpoints object during // the registration codepath. - if err = r.deregisterService(apiClient, serviceEndpoints.Name, serviceEndpoints.Namespace, endpointAddressMap); err != nil { - r.Log.Error(err, "failed to deregister endpoints", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) + if err = r.deregisterServiceOnAllAgents(ctx, serviceEndpoints.Name, serviceEndpoints.Namespace, endpointAddressMap); err != nil { + r.Log.Error(err, "failed to deregister endpoints on all agents", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) errs = multierror.Append(errs, err) } @@ -253,117 +220,193 @@ func (r *EndpointsController) Logger(name types.NamespacedName) logr.Logger { func (r *EndpointsController) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&corev1.Endpoints{}). - Complete(r) + Watches( + &source.Kind{Type: &corev1.Pod{}}, + handler.EnqueueRequestsFromMapFunc(r.requestsForRunningAgentPods), + builder.WithPredicates(predicate.NewPredicateFuncs(r.filterAgentPods)), + ).Complete(r) } // registerServicesAndHealthCheck creates Consul registrations for the service and proxy and registers them with Consul. // It also upserts a Kubernetes health check for the service based on whether the endpoint address is ready. -func (r *EndpointsController) registerServicesAndHealthCheck(apiClient *api.Client, pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string, endpointAddressMap map[string]bool) error { - // Build the endpointAddressMap up for deregistering service instances later. - endpointAddressMap[pod.Status.PodIP] = true - - var managedByEndpointsController bool - if raw, ok := pod.Labels[keyManagedBy]; ok && raw == managedByValue { - managedByEndpointsController = true - } - // For pods managed by this controller, create and register the service instance. - if managedByEndpointsController { - // Get information from the pod to create service instance registrations. - serviceRegistration, proxyServiceRegistration, err := r.createServiceRegistrations(apiClient, pod, serviceEndpoints, healthStatus) +func (r *EndpointsController) registerServicesAndHealthCheck(pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string, endpointAddressMap map[string]bool) error { + podHostIP := pod.Status.HostIP + + if hasBeenInjected(pod) { + // Build the endpointAddressMap up for deregistering service instances later. + endpointAddressMap[pod.Status.PodIP] = true + // Create client for Consul agent local to the pod. + client, err := r.remoteConsulClient(podHostIP, r.consulNamespace(pod.Namespace)) if err != nil { - r.Log.Error(err, "failed to create service registrations for endpoints", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) + r.Log.Error(err, "failed to create a new Consul client", "address", podHostIP) return err } - // Register the service instance with Consul. - r.Log.Info("registering service with Consul", "name", serviceRegistration.Service.Service, - "id", serviceRegistration.ID) - _, err = apiClient.Catalog().Register(serviceRegistration, nil) - if err != nil { - r.Log.Error(err, "failed to register service", "name", serviceRegistration.Service.Service) - return err + var managedByEndpointsController bool + if raw, ok := pod.Labels[keyManagedBy]; ok && raw == managedByValue { + managedByEndpointsController = true } + // For pods managed by this controller, create and register the service instance. + if managedByEndpointsController { + // Get information from the pod to create service instance registrations. + serviceRegistration, proxyServiceRegistration, err := r.createServiceRegistrations(pod, serviceEndpoints) + if err != nil { + r.Log.Error(err, "failed to create service registrations for endpoints", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) + return err + } - // Register the proxy service instance with Consul. - r.Log.Info("registering proxy service with Consul", "name", proxyServiceRegistration.Service.Service) - _, err = apiClient.Catalog().Register(proxyServiceRegistration, nil) + // Register the service instance with the local agent. + // Note: the order of how we register services is important, + // and the connect-proxy service should come after the "main" service + // because its alias health check depends on the main service existing. + r.Log.Info("registering service with Consul", "name", serviceRegistration.Name, + "id", serviceRegistration.ID, "agentIP", podHostIP) + err = client.Agent().ServiceRegister(serviceRegistration) + if err != nil { + r.Log.Error(err, "failed to register service", "name", serviceRegistration.Name) + return err + } + + // Register the proxy service instance with the local agent. + r.Log.Info("registering proxy service with Consul", "name", proxyServiceRegistration.Name) + err = client.Agent().ServiceRegister(proxyServiceRegistration) + if err != nil { + r.Log.Error(err, "failed to register proxy service", "name", proxyServiceRegistration.Name) + return err + } + } + + // Update the service TTL health check for both legacy services and services managed by endpoints + // controller. The proxy health checks are registered separately by endpoints controller and + // lifecycle sidecar for legacy services. Here, we always update the health check for legacy and + // newer services idempotently since the service health check is not added as part of the service + // registration. + reason := getHealthCheckStatusReason(healthStatus, pod.Name, pod.Namespace) + serviceName := getServiceName(pod, serviceEndpoints) + r.Log.Info("updating health check status for service", "name", serviceName, "reason", reason, "status", healthStatus) + serviceID := getServiceID(pod, serviceEndpoints) + healthCheckID := getConsulHealthCheckID(pod, serviceID) + err = r.upsertHealthCheck(pod, client, serviceID, healthCheckID, healthStatus) if err != nil { - r.Log.Error(err, "failed to register proxy service", "name", proxyServiceRegistration.Service.Service) + r.Log.Error(err, "failed to update health check status for service", "name", serviceName) return err } } return nil } -// registerGateway creates Consul registrations for the Connect Gateways and registers them with Consul. -// It also upserts a Kubernetes health check for the service based on whether the endpoint address is ready. -func (r *EndpointsController) registerGateway(apiClient *api.Client, pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string, endpointAddressMap map[string]bool) error { - // Build the endpointAddressMap up for deregistering service instances later. - endpointAddressMap[pod.Status.PodIP] = true - - var managedByEndpointsController bool - if raw, ok := pod.Labels[keyManagedBy]; ok && raw == managedByValue { - managedByEndpointsController = true - } - // For pods managed by this controller, create and register the service instance. - if managedByEndpointsController { - // Get information from the pod to create service instance registrations. - serviceRegistration, err := r.createGatewayRegistrations(pod, serviceEndpoints, healthStatus) +// getServiceCheck will return the health check for this pod and service if it exists. +func getServiceCheck(client *api.Client, healthCheckID string) (*api.AgentCheck, error) { + filter := fmt.Sprintf("CheckID == `%s`", healthCheckID) + checks, err := client.Agent().ChecksWithFilter(filter) + if err != nil { + return nil, err + } + // This will be nil (does not exist) or an actual check. + return checks[healthCheckID], nil +} + +// registerConsulHealthCheck registers a TTL health check for the service on this Agent local to the Pod. This will add +// the Pod's readiness status, which will mark the service instance healthy/unhealthy for Consul service mesh +// traffic. +func registerConsulHealthCheck(client *api.Client, consulHealthCheckID, serviceID, status string) error { + // Create a TTL health check in Consul associated with this service and pod. + // The TTL time is 100000h which should ensure that the check never fails due to timeout + // of the TTL check. + err := client.Agent().CheckRegister(&api.AgentCheckRegistration{ + ID: consulHealthCheckID, + Name: "Kubernetes Health Check", + ServiceID: serviceID, + AgentServiceCheck: api.AgentServiceCheck{ + TTL: "100000h", + Status: status, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, + }, + }) + if err != nil { + // Full error looks like: + // Unexpected response code: 500 (ServiceID "consulnamespace/svc-id" does not exist) + if strings.Contains(err.Error(), fmt.Sprintf("%s\" does not exist", serviceID)) { + return fmt.Errorf("service %q not found in Consul: unable to register health check", serviceID) + } + return fmt.Errorf("registering health check for service %q: %w", serviceID, err) + } + + return nil +} + +// updateConsulHealthCheckStatus updates the consul health check status. +func (r *EndpointsController) updateConsulHealthCheckStatus(client *api.Client, consulHealthCheckID, status, reason string) error { + r.Log.Info("updating health check", "id", consulHealthCheckID) + err := client.Agent().UpdateTTL(consulHealthCheckID, reason, status) + if err != nil { + return fmt.Errorf("error updating health check: %w", err) + } + return nil +} + +// upsertHealthCheck checks if the healthcheck exists for the service, and creates it if it doesn't exist, or updates it +// if it does. +func (r *EndpointsController) upsertHealthCheck(pod corev1.Pod, client *api.Client, serviceID, healthCheckID, status string) error { + reason := getHealthCheckStatusReason(status, pod.Name, pod.Namespace) + // Retrieve the health check that would exist if the service had one registered for this pod. + serviceCheck, err := getServiceCheck(client, healthCheckID) + if err != nil { + return fmt.Errorf("unable to get agent health checks: serviceID=%s, checkID=%s, %s", serviceID, healthCheckID, err) + } + if serviceCheck == nil { + // Create a new health check. + err = registerConsulHealthCheck(client, healthCheckID, serviceID, status) if err != nil { - r.Log.Error(err, "failed to create service registrations for endpoints", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) return err } - if r.EnableConsulNamespaces { - if _, err := namespaces.EnsureExists(apiClient, serviceRegistration.Service.Namespace, r.CrossNSACLPolicy); err != nil { - r.Log.Error(err, "failed to ensure Consul namespace exists", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace, "consul ns", serviceRegistration.Service.Namespace) - return err - } + // Also update it, the reason this is separate is there is no way to set the Output field of the health check + // at creation time, and this is what is displayed on the UI as opposed to the Notes field. + err = r.updateConsulHealthCheckStatus(client, healthCheckID, status, reason) + if err != nil { + return err } - - // Register the service instance with Consul. - r.Log.Info("registering gateway with Consul", "name", serviceRegistration.Service.Service, - "id", serviceRegistration.ID) - _, err = apiClient.Catalog().Register(serviceRegistration, nil) + } else if serviceCheck.Status != status { + err = r.updateConsulHealthCheckStatus(client, healthCheckID, status, reason) if err != nil { - r.Log.Error(err, "failed to register gateway", "name", serviceRegistration.Service.Service) return err } } - return nil } -// serviceName computes the service name to register with Consul from the pod and endpoints object. In a single port +// getServiceName computes the service name to register with Consul from the pod and endpoints object. In a single port // service, it defaults to the endpoints name, but can be overridden by a pod annotation. In a multi port service, the // endpoints name is always used since the pod annotation will have multiple service names listed (one per port). // Changing the Consul service name via annotations is not supported for multi port services. -func serviceName(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { - svcName := serviceEndpoints.Name +func getServiceName(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { + serviceName := serviceEndpoints.Name // If the annotation has a comma, it is a multi port Pod. In that case we always use the name of the endpoint. if serviceNameFromAnnotation, ok := pod.Annotations[annotationService]; ok && serviceNameFromAnnotation != "" && !strings.Contains(serviceNameFromAnnotation, ",") { - svcName = serviceNameFromAnnotation + serviceName = serviceNameFromAnnotation } - return svcName + return serviceName } -func serviceID(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { - return fmt.Sprintf("%s-%s", pod.Name, serviceName(pod, serviceEndpoints)) +func getServiceID(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { + return fmt.Sprintf("%s-%s", pod.Name, getServiceName(pod, serviceEndpoints)) } -func proxyServiceName(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { - serviceName := serviceName(pod, serviceEndpoints) +func getProxyServiceName(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { + serviceName := getServiceName(pod, serviceEndpoints) return fmt.Sprintf("%s-sidecar-proxy", serviceName) } -func proxyServiceID(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { - proxyServiceName := proxyServiceName(pod, serviceEndpoints) +func getProxyServiceID(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { + proxyServiceName := getProxyServiceName(pod, serviceEndpoints) return fmt.Sprintf("%s-%s", pod.Name, proxyServiceName) } // createServiceRegistrations creates the service and proxy service instance registrations with the information from the // Pod. -func (r *EndpointsController) createServiceRegistrations(apiClient *api.Client, pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string) (*api.CatalogRegistration, *api.CatalogRegistration, error) { +func (r *EndpointsController) createServiceRegistrations(pod corev1.Pod, serviceEndpoints corev1.Endpoints) (*api.AgentServiceRegistration, *api.AgentServiceRegistration, error) { // If a port is specified, then we determine the value of that port // and register that port for the host service. // The meshWebhook will always set the port annotation if one is not provided on the pod. @@ -386,16 +429,15 @@ func (r *EndpointsController) createServiceRegistrations(apiClient *api.Client, // Otherwise, the Consul service name should equal the Kubernetes Service name. // The service name in Consul defaults to the Endpoints object name, and is overridden by the pod // annotation consul.hashicorp.com/connect-service.. - svcName := serviceName(pod, serviceEndpoints) + serviceName := getServiceName(pod, serviceEndpoints) - svcID := serviceID(pod, serviceEndpoints) + serviceID := getServiceID(pod, serviceEndpoints) meta := map[string]string{ MetaKeyPodName: pod.Name, MetaKeyKubeServiceName: serviceEndpoints.Name, MetaKeyKubeNS: serviceEndpoints.Namespace, MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", } for k, v := range pod.Annotations { if strings.HasPrefix(k, annotationMeta) && strings.TrimPrefix(k, annotationMeta) != "" { @@ -408,40 +450,21 @@ func (r *EndpointsController) createServiceRegistrations(apiClient *api.Client, } tags := consulTags(pod) - consulNS := r.consulNamespace(pod.Namespace) - service := &api.AgentService{ - ID: svcID, - Service: svcName, + service := &api.AgentServiceRegistration{ + ID: serviceID, + Name: serviceName, Port: consulServicePort, Address: pod.Status.PodIP, Meta: meta, - Namespace: consulNS, + Namespace: r.consulNamespace(pod.Namespace), Tags: tags, } - serviceRegistration := &api.CatalogRegistration{ - Node: ConsulNodeName, - Address: ConsulNodeAddress, - NodeMeta: map[string]string{ - MetaKeySyntheticNode: "true", - }, - Service: service, - Check: &api.AgentCheck{ - CheckID: consulHealthCheckID(pod.Namespace, svcID), - Name: ConsulKubernetesCheckName, - Type: ConsulKubernetesCheckType, - Status: healthStatus, - ServiceID: svcID, - Output: getHealthCheckStatusReason(healthStatus, pod.Name, pod.Namespace), - Namespace: consulNS, - }, - SkipNodeUpdate: true, - } - proxySvcName := proxyServiceName(pod, serviceEndpoints) - proxySvcID := proxyServiceID(pod, serviceEndpoints) + proxyServiceName := getProxyServiceName(pod, serviceEndpoints) + proxyServiceID := getProxyServiceID(pod, serviceEndpoints) proxyConfig := &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: svcName, - DestinationServiceID: svcID, + DestinationServiceName: serviceName, + DestinationServiceID: serviceID, Config: make(map[string]interface{}), } @@ -478,16 +501,28 @@ func (r *EndpointsController) createServiceRegistrations(apiClient *api.Client, if idx := getMultiPortIdx(pod, serviceEndpoints); idx >= 0 { proxyPort += idx } - proxyService := &api.AgentService{ + proxyService := &api.AgentServiceRegistration{ Kind: api.ServiceKindConnectProxy, - ID: proxySvcID, - Service: proxySvcName, + ID: proxyServiceID, + Name: proxyServiceName, Port: proxyPort, Address: pod.Status.PodIP, Meta: meta, - Namespace: consulNS, + Namespace: r.consulNamespace(pod.Namespace), Proxy: proxyConfig, - Tags: tags, + Checks: api.AgentServiceChecks{ + { + Name: "Proxy Public Listener", + TCP: fmt.Sprintf("%s:%d", pod.Status.PodIP, proxyPort), + Interval: "10s", + DeregisterCriticalServiceAfter: "10m", + }, + { + Name: "Destination Alias", + AliasService: serviceID, + }, + }, + Tags: tags, } // A user can enable/disable tproxy for an entire namespace. @@ -605,216 +640,7 @@ func (r *EndpointsController) createServiceRegistrations(apiClient *api.Client, } } } - - proxyServiceRegistration := &api.CatalogRegistration{ - Node: ConsulNodeName, - Address: ConsulNodeAddress, - NodeMeta: map[string]string{ - MetaKeySyntheticNode: "true", - }, - Service: proxyService, - Check: &api.AgentCheck{ - CheckID: consulHealthCheckID(pod.Namespace, proxySvcID), - Name: ConsulKubernetesCheckName, - Type: ConsulKubernetesCheckType, - Status: healthStatus, - ServiceID: proxySvcID, - Output: getHealthCheckStatusReason(healthStatus, pod.Name, pod.Namespace), - Namespace: consulNS, - }, - SkipNodeUpdate: true, - } - - return serviceRegistration, proxyServiceRegistration, nil -} - -// createGatewayRegistrations creates the gateway service registrations with the information from the Pod. -func (r *EndpointsController) createGatewayRegistrations(pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string) (*api.CatalogRegistration, error) { - meta := map[string]string{ - MetaKeyPodName: pod.Name, - MetaKeyKubeServiceName: serviceEndpoints.Name, - MetaKeyKubeNS: serviceEndpoints.Namespace, - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - } - - service := &api.AgentService{ - ID: pod.Name, - Address: pod.Status.PodIP, - Meta: meta, - } - - gatewayServiceName, ok := pod.Annotations[annotationGatewayConsulServiceName] - if !ok { - return nil, fmt.Errorf("failed to read annontation %s from pod %s/%s", annotationGatewayConsulServiceName, pod.Namespace, pod.Name) - } - service.Service = gatewayServiceName - - var consulNS string - - // Set the service values. - switch pod.Annotations[annotationGatewayKind] { - case MeshGateway: - service.Kind = api.ServiceKindMeshGateway - if r.EnableConsulNamespaces { - service.Namespace = defaultNS - consulNS = defaultNS - } - - port, err := strconv.Atoi(pod.Annotations[annotationMeshGatewayContainerPort]) - if err != nil { - return nil, err - } - service.Port = port - - if r.EnableWANFederation { - meta[MetaKeyConsulWANFederation] = "1" - } - - wanAddr, wanPort, err := r.getWanData(pod, serviceEndpoints) - if err != nil { - return nil, err - } - service.TaggedAddresses = map[string]api.ServiceAddress{ - "lan": { - Address: pod.Status.PodIP, - Port: port, - }, - "wan": { - Address: wanAddr, - Port: wanPort, - }, - } - case TerminatingGateway: - service.Kind = api.ServiceKindTerminatingGateway - service.Port = 8443 - if ns, ok := pod.Annotations[annotationGatewayNamespace]; ok && r.EnableConsulNamespaces { - service.Namespace = ns - consulNS = ns - } - case IngressGateway: - service.Kind = api.ServiceKindIngressGateway - if ns, ok := pod.Annotations[annotationGatewayNamespace]; ok && r.EnableConsulNamespaces { - service.Namespace = ns - consulNS = ns - } - - wanAddr, wanPort, err := r.getWanData(pod, serviceEndpoints) - if err != nil { - return nil, err - } - service.Port = 21000 - service.TaggedAddresses = map[string]api.ServiceAddress{ - "lan": { - Address: pod.Status.PodIP, - Port: 21000, - }, - "wan": { - Address: wanAddr, - Port: wanPort, - }, - } - service.Proxy = &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_gateway_no_default_bind": true, - "envoy_gateway_bind_addresses": map[string]interface{}{ - "all-interfaces": map[string]interface{}{ - "address": "0.0.0.0", - }, - }, - }, - } - - default: - return nil, fmt.Errorf("%s must be one of %s, %s, or %s", annotationGatewayKind, MeshGateway, TerminatingGateway, IngressGateway) - } - - if r.MetricsConfig.DefaultEnableMetrics && r.MetricsConfig.EnableGatewayMetrics { - if pod.Annotations[annotationGatewayKind] == IngressGateway { - service.Proxy.Config["envoy_prometheus_bind_addr"] = fmt.Sprintf("%s:20200", pod.Status.PodIP) - } else { - service.Proxy = &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_prometheus_bind_addr": fmt.Sprintf("%s:20200", pod.Status.PodIP), - }, - } - } - } - - serviceRegistration := &api.CatalogRegistration{ - Node: ConsulNodeName, - Address: ConsulNodeAddress, - NodeMeta: map[string]string{ - MetaKeySyntheticNode: "true", - }, - Service: service, - Check: &api.AgentCheck{ - CheckID: consulHealthCheckID(pod.Namespace, pod.Name), - Name: ConsulKubernetesCheckName, - Type: ConsulKubernetesCheckType, - Status: healthStatus, - ServiceID: pod.Name, - Namespace: consulNS, - Output: getHealthCheckStatusReason(healthStatus, pod.Name, pod.Namespace), - }, - SkipNodeUpdate: true, - } - - return serviceRegistration, nil -} - -func (r *EndpointsController) getWanData(pod corev1.Pod, endpoints corev1.Endpoints) (string, int, error) { - var wanAddr string - source, ok := pod.Annotations[annotationGatewayWANSource] - if !ok { - return "", 0, fmt.Errorf("failed to read annotation %s", annotationGatewayWANSource) - } - switch source { - case "NodeName": - wanAddr = pod.Spec.NodeName - case "NodeIP": - wanAddr = pod.Status.HostIP - case "Static": - wanAddr = pod.Annotations[annotationGatewayWANAddress] - case "Service": - svc, err := r.getService(endpoints) - if err != nil { - return "", 0, fmt.Errorf("failed to read service %s in namespace %s", endpoints.Name, endpoints.Namespace) - } - switch svc.Spec.Type { - case corev1.ServiceTypeNodePort: - wanAddr = pod.Status.HostIP - case corev1.ServiceTypeClusterIP: - wanAddr = svc.Spec.ClusterIP - case corev1.ServiceTypeLoadBalancer: - if len(svc.Status.LoadBalancer.Ingress) == 0 { - return "", 0, fmt.Errorf("failed to read ingress config for loadbalancer for service %s in namespace %s", endpoints.Name, endpoints.Namespace) - } - for _, ingr := range svc.Status.LoadBalancer.Ingress { - if ingr.IP != "" { - wanAddr = ingr.IP - break - } else if ingr.Hostname != "" { - wanAddr = ingr.Hostname - break - } - } - } - } - - wanPort, err := strconv.Atoi(pod.Annotations[annotationGatewayWANPort]) - if err != nil { - return "", 0, fmt.Errorf("failed to parse WAN port from value %s", pod.Annotations[annotationGatewayWANPort]) - } - return wanAddr, wanPort, nil -} - -func (r *EndpointsController) getService(endpoints corev1.Endpoints) (*corev1.Service, error) { - var svc corev1.Service - if err := r.Client.Get(r.Context, types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, &svc); err != nil { - return nil, err - } - return &svc, nil + return service, proxyService, nil } // portValueFromIntOrString returns the integer port value from the port that can be @@ -833,9 +659,10 @@ func portValueFromIntOrString(pod corev1.Pod, port intstr.IntOrString) (int, err return int(portVal), nil } -// consulHealthCheckID deterministically generates a health check ID based on service ID and Kubernetes namespace. -func consulHealthCheckID(k8sNS string, serviceID string) string { - return fmt.Sprintf("%s/%s", k8sNS, serviceID) +// getConsulHealthCheckID deterministically generates a health check ID that will be unique to the Agent +// where the health check is registered and deregistered. +func getConsulHealthCheckID(pod corev1.Pod, serviceID string) string { + return fmt.Sprintf("%s/%s/kubernetes-health-check", pod.Namespace, serviceID) } // getHealthCheckStatusReason takes an Consul's health check status (either passing or critical) @@ -848,63 +675,90 @@ func getHealthCheckStatusReason(healthCheckStatus, podName, podNamespace string) return fmt.Sprintf("Pod \"%s/%s\" is not ready", podNamespace, podName) } -// deregisterService queries all services on the node for service instances that have the metadata +// deregisterServiceOnAllAgents queries all agents for service instances that have the metadata // "k8s-service-name"=k8sSvcName and "k8s-namespace"=k8sSvcNamespace. The k8s service name may or may not match the // consul service name, but the k8s service name will always match the metadata on the Consul service -// "k8s-service-name". So, we query Consul services by "k8s-service-name" metadata. -// When querying by the k8s service name and namespace, the request will return service instances and +// "k8s-service-name". So, we query Consul services by "k8s-service-name" metadata, which is only exposed on the agent +// API. Therefore, we need to query all agents who have services matching that metadata, and deregister each service +// instance. When querying by the k8s service name and namespace, the request will return service instances and // associated proxy service instances. // The argument endpointsAddressesMap decides whether to deregister *all* service instances or selectively deregister // them only if they are not in endpointsAddressesMap. If the map is nil, it will deregister all instances. If the map // has addresses, it will only deregister instances not in the map. -func (r *EndpointsController) deregisterService(apiClient *api.Client, k8sSvcName, k8sSvcNamespace string, endpointsAddressesMap map[string]bool) error { - // Get services matching metadata. - svcs, err := r.serviceInstancesForK8SServiceNameAndNamespace(apiClient, k8sSvcName, k8sSvcNamespace) - if err != nil { - r.Log.Error(err, "failed to get service instances", "name", k8sSvcName) +func (r *EndpointsController) deregisterServiceOnAllAgents(ctx context.Context, k8sSvcName, k8sSvcNamespace string, endpointsAddressesMap map[string]bool) error { + // Get all agents by getting pods with label component=client, app=consul and release= + agents := corev1.PodList{} + listOptions := client.ListOptions{ + Namespace: r.ReleaseNamespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + "component": "client", + "app": "consul", + "release": r.ReleaseName, + }), + } + if err := r.Client.List(ctx, &agents, &listOptions); err != nil { + r.Log.Error(err, "failed to get Consul client agent pods") return err } - // Deregister each service instance that matches the metadata. - for _, svc := range svcs.Services { - // We need to get services matching "k8s-service-name" and "k8s-namespace" metadata. - // If we selectively deregister, only deregister if the address is not in the map. Otherwise, deregister - // every service instance. - var serviceDeregistered bool - if endpointsAddressesMap != nil { - if _, ok := endpointsAddressesMap[svc.Address]; !ok { - // If the service address is not in the Endpoints addresses, deregister it. - r.Log.Info("deregistering service from consul", "svc", svc.ID) - _, err = apiClient.Catalog().Deregister(&api.CatalogDeregistration{ - Node: ConsulNodeName, - ServiceID: svc.ID, - Namespace: svc.Namespace, - }, nil) - if err != nil { - r.Log.Error(err, "failed to deregister service instance", "id", svc.ID) + // On each agent, we need to get services matching "k8s-service-name" and "k8s-namespace" metadata. + for _, agent := range agents.Items { + ready := false + for _, status := range agent.Status.Conditions { + if status.Type == corev1.PodReady { + ready = status.Status == corev1.ConditionTrue + } + } + if !ready { + // We can ignore this client agent here because once it switches its status from not-ready to ready, + // we will reconcile all services as part of that event. + r.Log.Info("Consul client agent is not ready, skipping deregistration", "consul-agent", agent.Name, "svc", k8sSvcName) + continue + } + client, err := r.remoteConsulClient(agent.Status.PodIP, r.consulNamespace(k8sSvcNamespace)) + if err != nil { + r.Log.Error(err, "failed to create a new Consul client", "address", agent.Status.PodIP) + return err + } + + // Get services matching metadata. + svcs, err := serviceInstancesForK8SServiceNameAndNamespace(k8sSvcName, k8sSvcNamespace, client) + if err != nil { + r.Log.Error(err, "failed to get service instances", "name", k8sSvcName) + return err + } + + // Deregister each service instance that matches the metadata. + for svcID, serviceRegistration := range svcs { + // If we selectively deregister, only deregister if the address is not in the map. Otherwise, deregister + // every service instance. + var serviceDeregistered bool + if endpointsAddressesMap != nil { + if _, ok := endpointsAddressesMap[serviceRegistration.Address]; !ok { + // If the service address is not in the Endpoints addresses, deregister it. + r.Log.Info("deregistering service from consul", "svc", svcID) + if err = client.Agent().ServiceDeregister(svcID); err != nil { + r.Log.Error(err, "failed to deregister service instance", "id", svcID) + return err + } + serviceDeregistered = true + } + } else { + r.Log.Info("deregistering service from consul", "svc", svcID) + if err = client.Agent().ServiceDeregister(svcID); err != nil { + r.Log.Error(err, "failed to deregister service instance", "id", svcID) return err } serviceDeregistered = true } - } else { - r.Log.Info("deregistering service from consul", "svc", svc.ID) - if _, err = apiClient.Catalog().Deregister(&api.CatalogDeregistration{ - Node: ConsulNodeName, - ServiceID: svc.ID, - Namespace: svc.Namespace, - }, nil); err != nil { - r.Log.Error(err, "failed to deregister service instance", "id", svc.ID) - return err - } - serviceDeregistered = true - } - if r.AuthMethod != "" && serviceDeregistered { - r.Log.Info("reconciling ACL tokens for service", "svc", svc.Service) - err = r.deleteACLTokensForServiceInstance(apiClient, svc, k8sSvcNamespace, svc.Meta[MetaKeyPodName]) - if err != nil { - r.Log.Error(err, "failed to reconcile ACL tokens for service", "svc", svc.Service) - return err + if r.AuthMethod != "" && serviceDeregistered { + r.Log.Info("reconciling ACL tokens for service", "svc", serviceRegistration.Service) + err = r.deleteACLTokensForServiceInstance(client, serviceRegistration.Service, k8sSvcNamespace, serviceRegistration.Meta[MetaKeyPodName]) + if err != nil { + r.Log.Error(err, "failed to reconcile ACL tokens for service", "svc", serviceRegistration.Service) + return err + } } } } @@ -915,15 +769,13 @@ func (r *EndpointsController) deregisterService(apiClient *api.Client, k8sSvcNam // deleteACLTokensForServiceInstance finds the ACL tokens that belongs to the service instance and deletes it from Consul. // It will only check for ACL tokens that have been created with the auth method this controller // has been configured with and will only delete tokens for the provided podName. -func (r *EndpointsController) deleteACLTokensForServiceInstance(apiClient *api.Client, svc *api.AgentService, k8sNS, podName string) error { +func (r *EndpointsController) deleteACLTokensForServiceInstance(client *api.Client, serviceName, k8sNS, podName string) error { // Skip if podName is empty. if podName == "" { return nil } - tokens, _, err := apiClient.ACL().TokenList(&api.QueryOptions{ - Namespace: svc.Namespace, - }) + tokens, _, err := client.ACL().TokenList(nil) if err != nil { return fmt.Errorf("failed to get a list of tokens from Consul: %s", err) } @@ -931,10 +783,10 @@ func (r *EndpointsController) deleteACLTokensForServiceInstance(apiClient *api.C for _, token := range tokens { // Only delete tokens that: // * have been created with the auth method configured for this endpoints controller - // * have a single service identity whose service name is the same as 'svc.Service' + // * have a single service identity whose service name is the same as 'serviceName' if token.AuthMethod == r.AuthMethod && len(token.ServiceIdentities) == 1 && - token.ServiceIdentities[0].ServiceName == svc.Service { + token.ServiceIdentities[0].ServiceName == serviceName { tokenMeta, err := getTokenMetaFromDescription(token.Description) if err != nil { return fmt.Errorf("failed to parse token metadata: %s", err) @@ -945,12 +797,16 @@ func (r *EndpointsController) deleteACLTokensForServiceInstance(apiClient *api.C // If we can't find token's pod, delete it. if tokenPodName == podName { r.Log.Info("deleting ACL token for pod", "name", podName) - if _, err := apiClient.ACL().TokenDelete(token.AccessorID, &api.WriteOptions{Namespace: svc.Namespace}); err != nil { + _, err = client.ACL().TokenDelete(token.AccessorID, nil) + if err != nil { return fmt.Errorf("failed to delete token from Consul: %s", err) } + } else if err != nil { + return err } } } + return nil } @@ -1031,19 +887,10 @@ func getTokenMetaFromDescription(description string) (map[string]string, error) // serviceInstancesForK8SServiceNameAndNamespace calls Consul's ServicesWithFilter to get the list // of services instances that have the provided k8sServiceName and k8sServiceNamespace in their metadata. -func (r *EndpointsController) serviceInstancesForK8SServiceNameAndNamespace(apiClient *api.Client, k8sServiceName, k8sServiceNamespace string) (*api.CatalogNodeServiceList, error) { - var ( - serviceList *api.CatalogNodeServiceList - err error - ) - filter := fmt.Sprintf(`Meta[%q] == %q and Meta[%q] == %q and Meta[%q] == %q`, - MetaKeyKubeServiceName, k8sServiceName, MetaKeyKubeNS, k8sServiceNamespace, MetaKeyManagedBy, managedByValue) - if r.EnableConsulNamespaces { - serviceList, _, err = apiClient.Catalog().NodeServiceList(ConsulNodeName, &api.QueryOptions{Filter: filter, Namespace: namespaces.WildcardNamespace}) - } else { - serviceList, _, err = apiClient.Catalog().NodeServiceList(ConsulNodeName, &api.QueryOptions{Filter: filter}) - } - return serviceList, err +func serviceInstancesForK8SServiceNameAndNamespace(k8sServiceName, k8sServiceNamespace string, client *api.Client) (map[string]*api.AgentService, error) { + return client.Agent().ServicesWithFilter( + fmt.Sprintf(`Meta[%q] == %q and Meta[%q] == %q and Meta[%q] == %q`, + MetaKeyKubeServiceName, k8sServiceName, MetaKeyKubeNS, k8sServiceNamespace, MetaKeyManagedBy, managedByValue)) } // processPreparedQueryUpstream processes an upstream in the format: @@ -1098,6 +945,24 @@ func (r *EndpointsController) processUnlabeledUpstream(pod corev1.Pod, rawUpstre // parse the optional datacenter if len(parts) > 2 { datacenter = strings.TrimSpace(parts[2]) + + // Check if there's a proxy defaults config with mesh gateway + // mode set to local or remote. This helps users from + // accidentally forgetting to set a mesh gateway mode + // and then being confused as to why their traffic isn't + // routing. + entry, _, err := r.ConsulClient.ConfigEntries().Get(api.ProxyDefaults, api.ProxyConfigGlobal, nil) + if err != nil && strings.Contains(err.Error(), "Unexpected response code: 404") { + return api.Upstream{}, fmt.Errorf("upstream %q is invalid: there is no ProxyDefaults config to set mesh gateway mode", rawUpstream) + } else if err == nil { + mode := entry.(*api.ProxyConfigEntry).MeshGateway.Mode + if mode != api.MeshGatewayModeLocal && mode != api.MeshGatewayModeRemote { + return api.Upstream{}, fmt.Errorf("upstream %q is invalid: ProxyDefaults mesh gateway mode is neither %q nor %q", rawUpstream, api.MeshGatewayModeLocal, api.MeshGatewayModeRemote) + } + } + // NOTE: If we can't reach Consul we don't error out because + // that would fail the pod scheduling and this is a nice-to-have + // check, not something that should block during a Consul hiccup. } if port > 0 { upstream = api.Upstream{ @@ -1193,6 +1058,15 @@ func (r *EndpointsController) processLabeledUpstream(pod corev1.Pod, rawUpstream return upstream, nil } +// remoteConsulClient returns an *api.Client that points at the consul agent local to the pod for a provided namespace. +func (r *EndpointsController) remoteConsulClient(ip string, namespace string) (*api.Client, error) { + newAddr := fmt.Sprintf("%s://%s:%s", r.ConsulScheme, ip, r.ConsulPort) + localConfig := r.ConsulClientCfg + localConfig.Address = newAddr + localConfig.Namespace = namespace + return consul.NewClient(localConfig, r.ConsulAPITimeout) +} + // shouldIgnore ignores namespaces where we don't connect-inject. func shouldIgnore(namespace string, denySet, allowSet mapset.Set) bool { // Ignores system namespaces. @@ -1213,6 +1087,92 @@ func shouldIgnore(namespace string, denySet, allowSet mapset.Set) bool { return false } +// filterAgentPods receives meta and object information for Kubernetes resources that are being watched, +// which in this case are Pods. It only returns true if the Pod is a Consul Client Agent Pod. It reads the labels +// from the meta of the resource and uses the values of the "app" and "component" label to validate that +// the Pod is a Consul Client Agent. +func (r *EndpointsController) filterAgentPods(object client.Object) bool { + podLabels := object.GetLabels() + app, ok := podLabels["app"] + if !ok { + return false + } + component, ok := podLabels["component"] + if !ok { + return false + } + + release, ok := podLabels["release"] + if !ok { + return false + } + + if app == "consul" && component == "client" && release == r.ReleaseName { + return true + } + return false +} + +// requestsForRunningAgentPods creates a slice of requests for the endpoints controller. +// It enqueues a request for each endpoint that needs to be reconciled. It iterates through +// the list of endpoints and creates a request for those endpoints that have an address that +// are on the same node as the new Consul Agent pod. It receives a Pod Object which is a +// Consul Agent that has been filtered by filterAgentPods and only enqueues endpoints +// for client agent pods where the Ready condition is true. +func (r *EndpointsController) requestsForRunningAgentPods(object client.Object) []ctrl.Request { + var consulClientPod corev1.Pod + r.Log.Info("received update for Consul client pod", "name", object.GetName()) + err := r.Client.Get(r.Context, types.NamespacedName{Name: object.GetName(), Namespace: object.GetNamespace()}, &consulClientPod) + if k8serrors.IsNotFound(err) { + // Ignore if consulClientPod is not found. + return []ctrl.Request{} + } + if err != nil { + r.Log.Error(err, "failed to get Consul client pod", "name", consulClientPod.Name) + return []ctrl.Request{} + } + // We can ignore the agent pod if it's not running, since + // we can't reconcile and register/deregister services against that agent. + if consulClientPod.Status.Phase != corev1.PodRunning { + r.Log.Info("ignoring Consul client pod because it's not running", "name", consulClientPod.Name) + return []ctrl.Request{} + } + // We can ignore the agent pod if it's not yet ready, since + // we can't reconcile and register/deregister services against that agent. + for _, cond := range consulClientPod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status != corev1.ConditionTrue { + // Ignore if consulClientPod is not ready. + r.Log.Info("ignoring Consul client pod because it's not ready", "name", consulClientPod.Name) + return []ctrl.Request{} + } + } + + // Get the list of all endpoints. + var endpointsList corev1.EndpointsList + err = r.Client.List(r.Context, &endpointsList) + if err != nil { + r.Log.Error(err, "failed to list endpoints") + return []ctrl.Request{} + } + + // Enqueue requests for endpoints that are on the same node + // as the client agent. + var requests []reconcile.Request + for _, ep := range endpointsList.Items { + for _, subset := range ep.Subsets { + allAddresses := subset.Addresses + allAddresses = append(allAddresses, subset.NotReadyAddresses...) + for _, address := range allAddresses { + // Only add requests for the address that is on the same node as the consul client pod. + if address.NodeName != nil && *address.NodeName == consulClientPod.Spec.NodeName { + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: ep.Name, Namespace: ep.Namespace}}) + } + } + } + } + return requests +} + // consulNamespace returns the Consul destination namespace for a provided Kubernetes namespace // depending on Consul Namespaces being enabled and the value of namespace mirroring. func (r *EndpointsController) consulNamespace(namespace string) string { @@ -1227,12 +1187,6 @@ func hasBeenInjected(pod corev1.Pod) bool { return false } -// isGateway checks the value of the gateway annotation and returns true if the Pod represents a Gateway. -func isGateway(pod corev1.Pod) bool { - anno, ok := pod.Annotations[annotationGatewayKind] - return ok && anno != "" -} - // mapAddresses combines all addresses to a mapping of address to its health status. func mapAddresses(addresses corev1.EndpointSubset) map[corev1.EndpointAddress]string { m := make(map[corev1.EndpointAddress]string) @@ -1284,7 +1238,7 @@ func consulTags(pod corev1.Pod) []string { func getMultiPortIdx(pod corev1.Pod, serviceEndpoints corev1.Endpoints) int { for i, name := range strings.Split(pod.Annotations[annotationService], ",") { - if name == serviceName(pod, serviceEndpoints) { + if name == getServiceName(pod, serviceEndpoints) { return i } } diff --git a/control-plane/connect-inject/endpoints_controller_ent_test.go b/control-plane/connect-inject/endpoints_controller_ent_test.go index 0d13b6a8ac..5859bd9206 100644 --- a/control-plane/connect-inject/endpoints_controller_ent_test.go +++ b/control-plane/connect-inject/endpoints_controller_ent_test.go @@ -5,6 +5,7 @@ package connectinject import ( "context" "fmt" + "strings" "testing" mapset "github.com/deckarep/golang-set" @@ -30,6 +31,7 @@ import ( // This test covers EndpointsController.createServiceRegistrations. func TestReconcileCreateEndpointWithNamespaces(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := map[string]struct { Mirror bool MirrorPrefix string @@ -74,64 +76,70 @@ func TestReconcileCreateEndpointWithNamespaces(t *testing.T) { ExpConsulNS: "prefix-default", }, } - for name, testCase := range cases { + for name, test := range cases { setup := struct { consulSvcName string k8sObjects func() []runtime.Object + initialConsulSvcs []*api.AgentServiceRegistration + expectedNumSvcInstances int expectedConsulSvcInstances []*api.CatalogService expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck + expectedAgentHealthChecks []*api.AgentCheck }{ consulSvcName: "service-created", k8sObjects: func() []runtime.Object { - pod1 := createPodWithNamespace("pod1", testCase.SourceKubeNS, "1.2.3.4", true, true) - pod2 := createPodWithNamespace("pod2", testCase.SourceKubeNS, "2.2.3.4", true, true) - endpoints := &corev1.Endpoints{ + pod1 := createPodWithNamespace("pod1", test.SourceKubeNS, "1.2.3.4", true, true) + pod2 := createPodWithNamespace("pod2", test.SourceKubeNS, "2.2.3.4", true, true) + endpointWithTwoAddresses := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-created", - Namespace: testCase.SourceKubeNS, + Namespace: test.SourceKubeNS, }, Subsets: []corev1.EndpointSubset{ { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", - Namespace: testCase.SourceKubeNS, + Namespace: test.SourceKubeNS, }, }, { - IP: "2.2.3.4", + IP: "2.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod2", - Namespace: testCase.SourceKubeNS, + Namespace: test.SourceKubeNS, }, }, }, }, }, } - return []runtime.Object{pod1, pod2, endpoints} + return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 2, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-created", ServiceName: "service-created", ServiceAddress: "1.2.3.4", - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: testCase.SourceKubeNS, MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: test.SourceKubeNS, MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - Namespace: testCase.ExpConsulNS, + Namespace: test.ExpConsulNS, }, { ServiceID: "pod2-service-created", ServiceName: "service-created", ServiceAddress: "2.2.3.4", - ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: testCase.SourceKubeNS, MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: test.SourceKubeNS, MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - Namespace: testCase.ExpConsulNS, + Namespace: test.ExpConsulNS, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -144,9 +152,9 @@ func TestReconcileCreateEndpointWithNamespaces(t *testing.T) { DestinationServiceName: "service-created", DestinationServiceID: "pod1-service-created", }, - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: testCase.SourceKubeNS, MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: test.SourceKubeNS, MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - Namespace: testCase.ExpConsulNS, + Namespace: test.ExpConsulNS, }, { ServiceID: "pod2-service-created-sidecar-proxy", @@ -157,84 +165,93 @@ func TestReconcileCreateEndpointWithNamespaces(t *testing.T) { DestinationServiceName: "service-created", DestinationServiceID: "pod2-service-created", }, - ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: testCase.SourceKubeNS, MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: test.SourceKubeNS, MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - Namespace: testCase.ExpConsulNS, + Namespace: test.ExpConsulNS, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: fmt.Sprintf("%s/pod1-service-created", testCase.SourceKubeNS), + CheckID: fmt.Sprintf("%s/pod1-service-created/kubernetes-health-check", test.SourceKubeNS), ServiceName: "service-created", ServiceID: "pod1-service-created", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - Namespace: testCase.ExpConsulNS, + Type: ttl, + Namespace: test.ExpConsulNS, }, { - CheckID: fmt.Sprintf("%s/pod1-service-created-sidecar-proxy", testCase.SourceKubeNS), - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - Namespace: testCase.ExpConsulNS, - }, - { - CheckID: fmt.Sprintf("%s/pod2-service-created", testCase.SourceKubeNS), + CheckID: fmt.Sprintf("%s/pod2-service-created/kubernetes-health-check", test.SourceKubeNS), ServiceName: "service-created", ServiceID: "pod2-service-created", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - Namespace: testCase.ExpConsulNS, - }, - { - CheckID: fmt.Sprintf("%s/pod2-service-created-sidecar-proxy", testCase.SourceKubeNS), - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod2-service-created-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - Namespace: testCase.ExpConsulNS, + Type: ttl, + Namespace: test.ExpConsulNS, }, }, } t.Run(name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, false) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + // Add the pods namespace. - ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testCase.SourceKubeNS}} + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: test.SourceKubeNS}} // Create fake k8s client. - k8sObjects := append(setup.k8sObjects(), &ns) + k8sObjects := append(setup.k8sObjects(), fakeClientPod, &ns) fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create test consulServer server - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) + // Create test Consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForLeader(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + Namespace: test.ExpConsulNS, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] - _, err := namespaces.EnsureExists(testClient.APIClient, testCase.ExpConsulNS, "") + _, err = namespaces.EnsureExists(consulClient, test.ExpConsulNS, "") require.NoError(t, err) + // Register service and proxy in Consul. + for _, svc := range setup.initialConsulSvcs { + err = consulClient.Agent().ServiceRegister(svc) + require.NoError(t, err) + } + // Create the endpoints controller. ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: "default", + ConsulClientCfg: cfg, EnableConsulNamespaces: true, - ConsulDestinationNamespace: testCase.DestConsulNS, - EnableNSMirroring: testCase.Mirror, - NSMirroringPrefix: testCase.MirrorPrefix, + ConsulDestinationNamespace: test.DestConsulNS, + EnableNSMirroring: test.Mirror, + NSMirroringPrefix: test.MirrorPrefix, } namespacedName := types.NamespacedName{ - Namespace: testCase.SourceKubeNS, + Namespace: test.SourceKubeNS, Name: "service-created", } @@ -244,15 +261,10 @@ func TestReconcileCreateEndpointWithNamespaces(t *testing.T) { require.NoError(t, err) require.False(t, resp.Requeue) - consulConfig := testClient.Cfg - consulConfig.APIClientConfig.Namespace = testCase.ExpConsulNS - consulClient, err := api.NewClient(consulConfig.APIClientConfig) - require.NoError(t, err) - // After reconciliation, Consul should have the service with the correct number of instances. - serviceInstances, _, err := consulClient.Catalog().Service(setup.consulSvcName, "", &api.QueryOptions{Namespace: testCase.ExpConsulNS}) + serviceInstances, _, err := consulClient.Catalog().Service(setup.consulSvcName, "", &api.QueryOptions{Namespace: test.ExpConsulNS}) require.NoError(t, err) - require.Len(t, serviceInstances, len(setup.expectedConsulSvcInstances)) + require.Len(t, serviceInstances, setup.expectedNumSvcInstances) for i, instance := range serviceInstances { require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceName, instance.ServiceName) @@ -260,13 +272,12 @@ func TestReconcileCreateEndpointWithNamespaces(t *testing.T) { require.Equal(t, setup.expectedConsulSvcInstances[i].ServicePort, instance.ServicePort) require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceMeta, instance.ServiceMeta) require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceTags, instance.ServiceTags) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceTaggedAddresses, instance.ServiceTaggedAddresses) } proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", setup.consulSvcName), "", &api.QueryOptions{ - Namespace: testCase.ExpConsulNS, + Namespace: test.ExpConsulNS, }) require.NoError(t, err) - require.Len(t, proxyServiceInstances, len(setup.expectedProxySvcInstances)) + require.Len(t, proxyServiceInstances, setup.expectedNumSvcInstances) for i, instance := range proxyServiceInstances { require.Equal(t, setup.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) require.Equal(t, setup.expectedProxySvcInstances[i].ServiceName, instance.ServiceName) @@ -277,258 +288,29 @@ func TestReconcileCreateEndpointWithNamespaces(t *testing.T) { require.Equal(t, setup.expectedProxySvcInstances[i].ServiceTags, instance.ServiceTags) } - // Check that the Consul health checks was created for the k8s pod. - for _, expectedCheck := range setup.expectedHealthChecks { - var checks api.HealthChecks - filter := fmt.Sprintf("CheckID == `%s`", expectedCheck.CheckID) - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, &api.QueryOptions{Filter: filter}) - require.NoError(t, err) - require.Equal(t, len(checks), 1) - var ignoredFields = []string{"Node", "Definition", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) - } - }) - } -} - -// TestReconcileCreateGatewayWithNamespaces verifies that gateways created using -// the Endpoints Controller with Consul namespaces are correct. -func TestReconcileCreateGatewayWithNamespaces(t *testing.T) { - t.Parallel() - cases := map[string]struct { - ConsulNS string - }{ - "default Consul namespace": { - ConsulNS: "default", - }, - "other Consul namespace": { - ConsulNS: "other", - }, - } - for name, testCase := range cases { - setup := struct { - k8sObjects func() []runtime.Object - expectedConsulSvcInstances []*api.CatalogService - expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck - }{ - k8sObjects: func() []runtime.Object { - meshGateway := createGatewayWithNamespace("mesh-gateway", "default", "3.3.3.3", map[string]string{ - annotationGatewayWANSource: "Static", - annotationGatewayWANAddress: "2.3.4.5", - annotationGatewayWANPort: "443", - annotationMeshGatewayContainerPort: "8443", - annotationGatewayKind: MeshGateway, - annotationGatewayConsulServiceName: "mesh-gateway"}) - terminatingGateway := createGatewayWithNamespace("terminating-gateway", "default", "4.4.4.4", map[string]string{ - annotationGatewayKind: TerminatingGateway, - annotationGatewayNamespace: testCase.ConsulNS, - annotationGatewayConsulServiceName: "terminating-gateway"}) - ingressGateway := createGatewayWithNamespace("ingress-gateway", "default", "5.5.5.5", map[string]string{ - annotationGatewayWANSource: "Service", - annotationGatewayWANPort: "8443", - annotationGatewayNamespace: testCase.ConsulNS, - annotationGatewayKind: IngressGateway, - annotationGatewayConsulServiceName: "ingress-gateway"}) - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "5.6.7.8", - }, - }, - }, - }, - } - endpoints := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "3.3.3.3", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "mesh-gateway", - Namespace: "default", - }, - }, - { - IP: "4.4.4.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "terminating-gateway", - Namespace: "default", - }, - }, - { - IP: "5.5.5.5", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "ingress-gateway", - Namespace: "default", - }, - }, - }, - }, - }, - } - return []runtime.Object{meshGateway, terminatingGateway, ingressGateway, svc, endpoints} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "mesh-gateway", - ServiceName: "mesh-gateway", - ServiceAddress: "3.3.3.3", - ServiceMeta: map[string]string{MetaKeyPodName: "mesh-gateway", MetaKeyKubeServiceName: "gateway", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - ServicePort: 8443, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "3.3.3.3", - Port: 8443, - }, - "wan": { - Address: "2.3.4.5", - Port: 443, - }, - }, - Namespace: "default", - }, - { - ServiceID: "terminating-gateway", - ServiceName: "terminating-gateway", - ServiceAddress: "4.4.4.4", - ServiceMeta: map[string]string{MetaKeyPodName: "terminating-gateway", MetaKeyKubeServiceName: "gateway", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - ServicePort: 8443, - Namespace: testCase.ConsulNS, - }, - { - ServiceID: "ingress-gateway", - ServiceName: "ingress-gateway", - ServiceAddress: "5.5.5.5", - ServiceMeta: map[string]string{MetaKeyPodName: "ingress-gateway", MetaKeyKubeServiceName: "gateway", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - ServicePort: 21000, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "5.5.5.5", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8443, - }, - }, - Namespace: testCase.ConsulNS, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/mesh-gateway", - ServiceName: "mesh-gateway", - ServiceID: "mesh-gateway", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - Namespace: "default", - }, - { - CheckID: "default/terminating-gateway", - ServiceName: "terminating-gateway", - ServiceID: "terminating-gateway", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - Namespace: testCase.ConsulNS, - }, - { - CheckID: "default/ingress-gateway", - ServiceName: "ingress-gateway", - ServiceID: "ingress-gateway", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - Namespace: testCase.ConsulNS, - }, - }, - } - t.Run(name, func(t *testing.T) { - // Create fake k8s client. - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(setup.k8sObjects()...).Build() - - // Create testCase Consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient - _, err := namespaces.EnsureExists(consulClient, testCase.ConsulNS, "") - require.NoError(t, err) - - // Create the endpoints controller. - ep := &EndpointsController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: "default", - EnableConsulNamespaces: true, - } - namespacedName := types.NamespacedName{ - Namespace: "default", - Name: "gateway", - } - - resp, err := ep.Reconcile(context.Background(), ctrl.Request{ - NamespacedName: namespacedName, - }) + _, checkInfos, err := consulClient.Agent().AgentHealthServiceByName(fmt.Sprintf("%s-sidecar-proxy", setup.consulSvcName)) + expectedChecks := []string{"Proxy Public Listener", "Destination Alias"} require.NoError(t, err) - require.False(t, resp.Requeue) - - // After reconciliation, Consul should have the service with the correct number of instances. - var serviceInstances []*api.CatalogService - for _, expected := range setup.expectedConsulSvcInstances { - serviceInstance, _, err := consulClient.Catalog().Service(expected.ServiceName, "", &api.QueryOptions{Namespace: expected.Namespace}) - require.NoError(t, err) - serviceInstances = append(serviceInstances, serviceInstance...) + require.Len(t, checkInfos, setup.expectedNumSvcInstances) + for _, checkInfo := range checkInfos { + checks := checkInfo.Checks + require.Contains(t, expectedChecks, checks[0].Name) + require.Contains(t, expectedChecks, checks[1].Name) + require.Equal(t, test.ExpConsulNS, checks[0].Namespace) + require.Equal(t, test.ExpConsulNS, checks[1].Namespace) } - require.Len(t, serviceInstances, len(setup.expectedConsulSvcInstances)) - for i, instance := range serviceInstances { - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceName, instance.ServiceName) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServicePort, instance.ServicePort) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceMeta, instance.ServiceMeta) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceTags, instance.ServiceTags) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceTaggedAddresses, instance.ServiceTaggedAddresses) - } - - // Check that the Consul health checks was created for the k8s pod. - for _, expectedCheck := range setup.expectedHealthChecks { - var checks api.HealthChecks - filter := fmt.Sprintf("CheckID == `%s`", expectedCheck.CheckID) - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, &api.QueryOptions{Filter: filter, Namespace: expectedCheck.Namespace}) - require.NoError(t, err) - require.Equal(t, len(checks), 1) - var ignoredFields = []string{"Node", "Definition", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) + // Check that the Consul health check was created for the k8s pod. + if setup.expectedAgentHealthChecks != nil { + for i := range setup.expectedConsulSvcInstances { + filter := fmt.Sprintf("CheckID == `%s`", setup.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, 1, len(check)) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[setup.expectedAgentHealthChecks[i].CheckID], setup.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } } }) } @@ -536,20 +318,20 @@ func TestReconcileCreateGatewayWithNamespaces(t *testing.T) { // Tests updating an Endpoints object when Consul namespaces are enabled. // - Tests updates via the register codepath: -// - When an address in an Endpoint is updated, that the corresponding service instance in Consul is updated in the correct Consul namespace. -// - When an address is added to an Endpoint, an additional service instance in Consul is registered in the correct Consul namespace. +// - When an address in an Endpoint is updated, that the corresponding service instance in Consul is updated in the correct Consul namespace. +// - When an address is added to an Endpoint, an additional service instance in Consul is registered in the correct Consul namespace. // - Tests updates via the deregister codepath: -// - When an address is removed from an Endpoint, the corresponding service instance in Consul is deregistered. -// - When an address is removed from an Endpoint *and there are no addresses left in the Endpoint*, the +// - When an address is removed from an Endpoint, the corresponding service instance in Consul is deregistered. +// - When an address is removed from an Endpoint *and there are no addresses left in the Endpoint*, the // corresponding service instance in Consul is deregistered. -// // For the register and deregister codepath, this also tests that they work when the Consul service name is different // from the K8s service name. -// This test covers EndpointsController.deregisterService when services should be selectively deregistered +// This test covers EndpointsController.deregisterServiceOnAllAgents when services should be selectively deregistered // since the map will not be nil. func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { t.Parallel() - nsCases := map[string]struct { + nodeName := "test-node" + cases := map[string]struct { Mirror bool MirrorPrefix string SourceKubeNS string @@ -593,21 +375,22 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { ExpConsulNS: "prefix-default", }, } - for name, ts := range nsCases { + for name, ts := range cases { cases := []struct { name string consulSvcName string k8sObjects func() []runtime.Object - initialConsulSvcs []*api.CatalogRegistration + initialConsulSvcs []*api.AgentServiceRegistration expectedConsulSvcInstances []*api.CatalogService expectedProxySvcInstances []*api.CatalogService + expectedAgentHealthChecks []*api.AgentCheck enableACLs bool }{ { - name: "Endpoints has an updated address (pod IP change).", + name: "Legacy service: Health check is added to the correct namespace", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "4.4.4.4", true, true) + pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, false) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -617,7 +400,8 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "4.4.4.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -630,34 +414,102 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, }, + }, + expectedAgentHealthChecks: []*api.AgentCheck{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", + CheckID: fmt.Sprintf("%s/pod1-service-updated/kubernetes-health-check", ts.SourceKubeNS), + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, + Namespace: ts.ExpConsulNS, + }, + }, + }, + { + name: "Endpoints has an updated address (pod IP change).", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "4.4.4.4", true, true) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "4.4.4.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: ts.SourceKubeNS, + }, + }, + }, }, - Namespace: ts.ExpConsulNS, }, + } + return []runtime.Object{pod1, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Namespace: ts.ExpConsulNS, }, }, expectedConsulSvcInstances: []*api.CatalogService{ @@ -690,7 +542,8 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "4.4.4.4", + IP: "4.4.4.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -703,34 +556,26 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", }, + Namespace: ts.ExpConsulNS, }, }, expectedConsulSvcInstances: []*api.CatalogService{ @@ -763,7 +608,8 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -771,7 +617,8 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { }, }, { - IP: "2.2.3.4", + IP: "2.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod2", @@ -784,34 +631,26 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, + Namespace: ts.ExpConsulNS, }, }, expectedConsulSvcInstances: []*api.CatalogService{ @@ -853,7 +692,8 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -866,64 +706,48 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, }, expectedConsulSvcInstances: []*api.CatalogService{ @@ -956,7 +780,8 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -969,64 +794,48 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod2-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod2-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod2-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod2-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod2-different-consul-svc-name", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, }, expectedConsulSvcInstances: []*api.CatalogService{ @@ -1058,64 +867,48 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } return []runtime.Object{endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, }, expectedConsulSvcInstances: []*api.CatalogService{}, @@ -1135,64 +928,48 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } return []runtime.Object{endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod2-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, - }, + ID: "pod2-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod2-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod2-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod2-different-consul-svc-name", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, }, }, expectedConsulSvcInstances: []*api.CatalogService{}, @@ -1212,7 +989,8 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "4.4.4.4", + IP: "4.4.4.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod2", @@ -1225,47 +1003,37 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } return []runtime.Object{pod2, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyManagedBy: managedByValue, - MetaKeyKubeServiceName: "service-updated", - MetaKeyPodName: "pod1", - MetaKeyKubeNS: ts.SourceKubeNS, - MetaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyManagedBy: managedByValue, + MetaKeyKubeServiceName: "service-updated", + MetaKeyPodName: "pod1", + MetaKeyKubeNS: ts.SourceKubeNS, }, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{ - MetaKeyManagedBy: managedByValue, - MetaKeyKubeServiceName: "service-updated", - MetaKeyPodName: "pod1", - MetaKeyKubeNS: ts.SourceKubeNS, - MetaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{ + MetaKeyManagedBy: managedByValue, + MetaKeyKubeServiceName: "service-updated", + MetaKeyPodName: "pod1", + MetaKeyKubeNS: ts.SourceKubeNS, }, + Namespace: ts.ExpConsulNS, }, }, expectedConsulSvcInstances: []*api.CatalogService{ @@ -1298,7 +1066,8 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -1311,88 +1080,68 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: ts.SourceKubeNS, - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "pod1", - MetaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", }, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: ts.SourceKubeNS, - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "pod1", - MetaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", }, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: ts.SourceKubeNS, - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "pod2", - MetaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", }, + Namespace: ts.ExpConsulNS, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: ts.SourceKubeNS, - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "pod2", - MetaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", }, + Namespace: ts.ExpConsulNS, }, }, expectedConsulSvcInstances: []*api.CatalogService{ @@ -1414,24 +1163,46 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } for _, tt := range cases { t.Run(fmt.Sprintf("%s: %s", name, tt.name), func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + // Add the pods namespace. ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ts.SourceKubeNS}} // Create fake k8s client. - k8sObjects := append(tt.k8sObjects(), &ns) + k8sObjects := append(tt.k8sObjects(), fakeClientPod, &ns) fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create test consulServer server adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { if tt.enableACLs { - c.ACL.Enabled = tt.enableACLs + c.ACL.Enabled = true c.ACL.Tokens.InitialManagement = adminToken } + c.NodeName = nodeName }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForSerfCheck(t) + + cfg := &api.Config{ + Scheme: "http", + Address: consul.HTTPAddr, + Namespace: ts.ExpConsulNS, + } + if tt.enableACLs { + cfg.Token = adminToken + } - consulClient := testClient.APIClient + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(cfg.Address, ":") + consulPort := addr[1] - _, err := namespaces.EnsureExists(consulClient, ts.ExpConsulNS, "") + _, err = namespaces.EnsureExists(consulClient, ts.ExpConsulNS, "") require.NoError(t, err) // Holds token accessorID for each service ID. @@ -1439,24 +1210,22 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { // Register service and proxy in Consul. for _, svc := range tt.initialConsulSvcs { - _, err = consulClient.Catalog().Register(svc, nil) + err = consulClient.Agent().ServiceRegister(svc) require.NoError(t, err) // Create a token for this service if ACLs are enabled. if tt.enableACLs { - if svc.Service.Kind != api.ServiceKindConnectProxy { + if svc.Kind != api.ServiceKindConnectProxy { var writeOpts api.WriteOptions // When mirroring is enabled, the auth method will be created in the "default" Consul namespace. if ts.Mirror { writeOpts.Namespace = "default" - } else { - writeOpts.Namespace = ts.ExpConsulNS } - test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Service.Service, svc.Service.Meta[MetaKeyKubeNS], ts.ExpConsulNS, ts.Mirror, ts.MirrorPrefix) + test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Name, svc.Meta[MetaKeyKubeNS], ts.ExpConsulNS, ts.Mirror, ts.MirrorPrefix) token, _, err := consulClient.ACL().Login(&api.ACLLoginParams{ AuthMethod: test.AuthMethod, BearerToken: test.ServiceAccountJWTToken, Meta: map[string]string{ - TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Service.Meta[MetaKeyKubeNS], svc.Service.Meta[MetaKeyPodName]), + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), }, }, &writeOpts) @@ -1475,11 +1244,11 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { AuthMethod: test.AuthMethod, BearerToken: test.ServiceAccountJWTToken, Meta: map[string]string{ - TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Service.Meta[MetaKeyKubeNS], "does-not-exist"), + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], "does-not-exist"), }, }, &writeOpts) require.NoError(t, err) - tokensForServices["does-not-exist"+svc.Service.Service] = token.AccessorID + tokensForServices["does-not-exist"+svc.Name] = token.AccessorID } } } @@ -1488,12 +1257,14 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: cfg.Scheme, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: "default", + ConsulClientCfg: cfg, EnableConsulNamespaces: true, EnableNSMirroring: ts.Mirror, NSMirroringPrefix: ts.MirrorPrefix, @@ -1513,12 +1284,6 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { require.NoError(t, err) require.False(t, resp.Requeue) - // Create new consul client with the expected consul ns so we can make calls for assertions. - consulConfig := testClient.Cfg - consulConfig.APIClientConfig.Namespace = ts.ExpConsulNS - consulClient, err = api.NewClient(consulConfig.APIClientConfig) - require.NoError(t, err) - // After reconciliation, Consul should have service-updated with the correct number of instances. serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) require.NoError(t, err) @@ -1535,6 +1300,19 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { require.Equal(t, tt.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) } + // Check that the Consul health check was created for the k8s pod. + if tt.expectedAgentHealthChecks != nil { + for i := range tt.expectedConsulSvcInstances { + filter := fmt.Sprintf("CheckID == `%s`", tt.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, 1, len(check)) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[tt.expectedAgentHealthChecks[i].CheckID], tt.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } + } + if tt.enableACLs { // Put expected services into a map to make it easier to find service IDs. expectedServices := mapset.NewSet() @@ -1570,9 +1348,10 @@ func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { } // Tests deleting an Endpoints object, with and without matching Consul and K8s service names when Consul namespaces are enabled. -// This test covers EndpointsController.deregisterService when the map is nil (not selectively deregistered). +// This test covers EndpointsController.deregisterServiceOnAllAgents when the map is nil (not selectively deregistered). func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := map[string]struct { Mirror bool MirrorPrefix string @@ -1621,16 +1400,16 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { cases := []struct { name string consulSvcName string - initialConsulSvcs []*api.AgentService + initialConsulSvcs []*api.AgentServiceRegistration enableACLs bool }{ { name: "Consul service name matches K8s service name", consulSvcName: "service-deleted", - initialConsulSvcs: []*api.AgentService{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { ID: "pod1-service-deleted", - Service: "service-deleted", + Name: "service-deleted", Port: 80, Address: "1.2.3.4", Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, @@ -1639,7 +1418,7 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { { Kind: api.ServiceKindConnectProxy, ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", Port: 20000, Address: "1.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ @@ -1654,10 +1433,10 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { { name: "Consul service name does not match K8s service name", consulSvcName: "different-consul-svc-name", - initialConsulSvcs: []*api.AgentService{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", + Name: "different-consul-svc-name", Port: 80, Address: "1.2.3.4", Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, @@ -1666,7 +1445,7 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { { Kind: api.ServiceKindConnectProxy, ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", Port: 20000, Address: "1.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ @@ -1682,10 +1461,10 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { { name: "When ACLs are enabled, the ACL token should be deleted", consulSvcName: "service-deleted", - initialConsulSvcs: []*api.AgentService{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { ID: "pod1-service-deleted", - Service: "service-deleted", + Name: "service-deleted", Port: 80, Address: "1.2.3.4", Meta: map[string]string{ @@ -1699,7 +1478,7 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { { Kind: api.ServiceKindConnectProxy, ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", Port: 20000, Address: "1.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ @@ -1711,7 +1490,6 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { MetaKeyKubeNS: ts.SourceKubeNS, MetaKeyManagedBy: managedByValue, MetaKeyPodName: "pod1", - MetaKeySyntheticNode: "true", }, Namespace: ts.ExpConsulNS, }, @@ -1721,51 +1499,68 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { } for _, tt := range cases { t.Run(fmt.Sprintf("%s:%s", name, tt.name), func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + // Create fake k8s client. - fakeClient := fake.NewClientBuilder().Build() + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(fakeClientPod).Build() - // Create test consulServer server + // Create test Consul server. adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { if tt.enableACLs { - c.ACL.Enabled = tt.enableACLs + c.ACL.Enabled = true c.ACL.Tokens.InitialManagement = adminToken } + c.NodeName = nodeName }) - consulClient := testClient.APIClient + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForLeader(t) + cfg := &api.Config{ + Address: consul.HTTPAddr, + Namespace: ts.ExpConsulNS, + } + if tt.enableACLs { + cfg.Token = adminToken + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] - _, err := namespaces.EnsureExists(consulClient, ts.ExpConsulNS, "") + _, err = namespaces.EnsureExists(consulClient, ts.ExpConsulNS, "") require.NoError(t, err) // Register service and proxy in consul. var token *api.ACLToken for _, svc := range tt.initialConsulSvcs { - serviceRegistration := &api.CatalogRegistration{ - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) + err = consulClient.Agent().ServiceRegister(svc) require.NoError(t, err) // Create a token for it if ACLs are enabled. if tt.enableACLs { - var writeOpts api.WriteOptions - // When mirroring is enabled, the auth method will be created in the "default" Consul namespace. - if ts.Mirror { - writeOpts.Namespace = "default" - } else { - writeOpts.Namespace = ts.ExpConsulNS - } - test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Service, svc.Meta[MetaKeyKubeNS], ts.ExpConsulNS, ts.Mirror, ts.MirrorPrefix) - token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), - }, - }, &writeOpts) + if svc.Kind != api.ServiceKindConnectProxy { + var writeOpts api.WriteOptions + // When mirroring is enabled, the auth method will be created in the "default" Consul namespace. + if ts.Mirror { + writeOpts.Namespace = "default" + } + test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Name, svc.Meta[MetaKeyKubeNS], ts.ExpConsulNS, ts.Mirror, ts.MirrorPrefix) + token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), + }, + }, &writeOpts) - require.NoError(t, err) + require.NoError(t, err) + } } } @@ -1773,12 +1568,14 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: "default", + ConsulClientCfg: cfg, EnableConsulNamespaces: true, EnableNSMirroring: ts.Mirror, NSMirroringPrefix: ts.MirrorPrefix, @@ -1799,11 +1596,6 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { require.NoError(t, err) require.False(t, resp.Requeue) - consulConfig := testClient.Cfg - consulConfig.APIClientConfig.Namespace = ts.ExpConsulNS - consulClient, err = api.NewClient(consulConfig.APIClientConfig) - require.NoError(t, err) - // After reconciliation, Consul should not have any instances of service-deleted. serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) require.NoError(t, err) @@ -1821,290 +1613,6 @@ func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { } } -// Tests deleting an Endpoints object, with and without matching Consul and K8s service names when Consul namespaces are enabled. -// This test covers EndpointsController.deregisterService when the map is nil (not selectively deregistered). -func TestReconcileDeleteGatewayWithNamespaces(t *testing.T) { - t.Parallel() - - consulSvcName := "service-deleted" - cases := map[string]struct { - ConsulNS string - }{ - "default Consul namespace": { - ConsulNS: "default", - }, - "other Consul namespace": { - ConsulNS: "other", - }, - } - for name, ts := range cases { - cases := []struct { - name string - initialConsulSvcs []*api.AgentService - enableACLs bool - }{ - { - name: "mesh-gateway", - initialConsulSvcs: []*api.AgentService{ - { - ID: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Service: "mesh-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "mesh-gateway", - MetaKeySyntheticNode: "true", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - Namespace: "default", - }, - }, - enableACLs: false, - }, - { - name: "mesh-gateway with ACLs enabled", - initialConsulSvcs: []*api.AgentService{ - { - ID: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Service: "mesh-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "mesh-gateway", - MetaKeySyntheticNode: "true", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - Namespace: "default", - }, - }, - enableACLs: true, - }, - { - name: "terminating-gateway", - initialConsulSvcs: []*api.AgentService{ - { - ID: "terminating-gateway", - Kind: api.ServiceKindTerminatingGateway, - Service: "terminating-gateway", - Port: 8443, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "terminating-gateway", - MetaKeySyntheticNode: "true", - }, - Namespace: ts.ConsulNS, - }, - }, - enableACLs: false, - }, - { - name: "terminating-gateway with ACLs enabled", - initialConsulSvcs: []*api.AgentService{ - { - ID: "terminating-gateway", - Kind: api.ServiceKindTerminatingGateway, - Service: "terminating-gateway", - Port: 8443, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "terminating-gateway", - MetaKeySyntheticNode: "true", - }, - Namespace: ts.ConsulNS, - }, - }, - enableACLs: true, - }, - { - name: "ingress-gateway", - initialConsulSvcs: []*api.AgentService{ - { - ID: "ingress-gateway", - Kind: api.ServiceKindIngressGateway, - Service: "ingress-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "gateway", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "ingress-gateway", - MetaKeySyntheticNode: "true", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - Namespace: ts.ConsulNS, - }, - }, - enableACLs: false, - }, - { - name: "ingress-gateway with ACLs enabled", - initialConsulSvcs: []*api.AgentService{ - { - ID: "ingress-gateway", - Kind: api.ServiceKindIngressGateway, - Service: "ingress-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeyPodName: "ingress-gateway", - MetaKeySyntheticNode: "true", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - Namespace: ts.ConsulNS, - }, - }, - enableACLs: true, - }, - } - for _, tt := range cases { - t.Run(fmt.Sprintf("%s:%s", name, tt.name), func(t *testing.T) { - // Create fake k8s client. - fakeClient := fake.NewClientBuilder().Build() - - // Create test Consul server. - adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - if tt.enableACLs { - c.ACL.Enabled = tt.enableACLs - c.ACL.Tokens.InitialManagement = adminToken - } - }) - consulClient := testClient.APIClient - - _, err := namespaces.EnsureExists(consulClient, ts.ConsulNS, "") - require.NoError(t, err) - - // Register service and proxy in consul. - var token *api.ACLToken - for _, svc := range tt.initialConsulSvcs { - serviceRegistration := &api.CatalogRegistration{ - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) - - // Create a token for it if ACLs are enabled. - if tt.enableACLs { - var writeOpts api.WriteOptions - if svc.Kind == api.ServiceKindMeshGateway { - writeOpts.Namespace = "default" // Mesh Gateways must always be registered in the "default" namespace. - } else { - writeOpts.Namespace = ts.ConsulNS - } - - test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Service, svc.Meta[MetaKeyKubeNS], writeOpts.Namespace, false, "") - token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), - "component": svc.ID, - }, - }, &writeOpts) - - require.NoError(t, err) - } - } - - // Create the endpoints controller. - ep := &EndpointsController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: "default", - EnableConsulNamespaces: true, - } - if tt.enableACLs { - ep.AuthMethod = test.AuthMethod - } - - // Set up the Endpoint that will be reconciled, and reconcile. - namespacedName := types.NamespacedName{ - Namespace: "default", - Name: "service-deleted", - } - resp, err := ep.Reconcile(context.Background(), ctrl.Request{ - NamespacedName: namespacedName, - }) - require.NoError(t, err) - require.False(t, resp.Requeue) - - // After reconciliation, Consul should not have any instances of service-deleted. - defaultNS, _, err := consulClient.Catalog().Service(consulSvcName, "", &api.QueryOptions{Namespace: "default"}) - require.NoError(t, err) - testNS, _, err := consulClient.Catalog().Service(consulSvcName, "", &api.QueryOptions{Namespace: ts.ConsulNS}) - require.NoError(t, err) - require.Empty(t, append(defaultNS, testNS...)) - - if tt.enableACLs { - _, _, err = consulClient.ACL().TokenRead(token.AccessorID, nil) - require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") - } - }) - } - } -} - func createPodWithNamespace(name, namespace, ip string, inject bool, managedByEndpointsController bool) *corev1.Pod { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -2135,28 +1643,3 @@ func createPodWithNamespace(name, namespace, ip string, inject bool, managedByEn return pod } - -func createGatewayWithNamespace(name, namespace, ip string, annotations map[string]string) *corev1.Pod { - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - keyManagedBy: managedByValue, - }, - Annotations: annotations, - }, - Status: corev1.PodStatus{ - PodIP: ip, - HostIP: "127.0.0.1", - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, - } - return pod -} diff --git a/control-plane/connect-inject/endpoints_controller_test.go b/control-plane/connect-inject/endpoints_controller_test.go index 97bb69ceaf..abbad4b2a6 100644 --- a/control-plane/connect-inject/endpoints_controller_test.go +++ b/control-plane/connect-inject/endpoints_controller_test.go @@ -24,6 +24,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) +const ( + ttl = "ttl" +) + func TestShouldIgnore(t *testing.T) { t.Parallel() cases := []struct { @@ -87,7 +91,7 @@ func TestHasBeenInjected(t *testing.T) { { name: "Pod with injected annotation", pod: func() corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) return *pod1 }, expected: true, @@ -95,7 +99,7 @@ func TestHasBeenInjected(t *testing.T) { { name: "Pod without injected annotation", pod: func() corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", false, true) + pod1 := createPod("pod1", "1.2.3.4", false, true) return *pod1 }, expected: false, @@ -110,8 +114,84 @@ func TestHasBeenInjected(t *testing.T) { } } +// TestProcessUpstreamsTLSandACLs enables TLS and ACLS and tests processUpstreams through +// the only path which sets up and uses a consul client: when proxy defaults need to be read. +// This test was plucked from the table test TestProcessUpstreams as the rest do not use the client. +func TestProcessUpstreamsTLSandACLs(t *testing.T) { + t.Parallel() + nodeName := "test-node" + + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" + caFile, certFile, keyFile := test.GenerateServerCerts(t) + // Create test consul server with ACLs and TLS. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + c.ACL.DefaultPolicy = "deny" + c.ACL.Tokens.InitialManagement = masterToken + c.CAFile = caFile + c.CertFile = certFile + c.KeyFile = keyFile + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + cfg := &api.Config{ + Address: consul.HTTPSAddr, + Scheme: "https", + TLSConfig: api.TLSConfig{ + CAFile: caFile, + }, + Token: masterToken, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPSAddr, ":") + consulPort := addr[1] + + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = api.MeshGatewayModeRemote + _, _, err = consulClient.ConfigEntries().Set(pd, &api.WriteOptions{}) + require.NoError(t, err) + + ep := &EndpointsController{ + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "https", + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + } + + pod := createPod("pod1", "1.2.3.4", true, true) + pod.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + + upstreams, err := ep.processUpstreams(*pod, corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svcname", + Namespace: "default", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + }) + require.NoError(t, err) + + expected := []api.Upstream{ + { + DestinationType: api.UpstreamDestTypeService, + DestinationName: "upstream1", + Datacenter: "dc1", + LocalBindPort: 1234, + }, + } + require.Equal(t, expected, upstreams) +} + func TestProcessUpstreams(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { name string pod func() *corev1.Pod @@ -125,7 +205,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc only", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc:1234" return pod1 }, @@ -142,7 +222,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc and dc", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.dc1.dc:1234" return pod1 }, @@ -160,7 +240,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc and peer", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.peer1.peer:1234" return pod1 }, @@ -178,7 +258,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc and peer, needs ns before peer if namespaces enabled", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.peer1.peer:1234" return pod1 }, @@ -189,7 +269,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc, ns, and peer", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.peer1.peer:1234" return pod1 }, @@ -208,7 +288,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc, ns, and partition", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.part1.ap:1234" return pod1 }, @@ -227,7 +307,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc, ns, and dc", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.dc1.dc:1234" return pod1 }, @@ -246,7 +326,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "multiple annotated upstreams", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.dc1.dc:1234, upstream2.svc:2234, upstream3.svc.ns1.ns:3234, upstream4.svc.ns1.ns.peer1.peer:4234" return pod1 }, @@ -283,7 +363,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid partition/dc/peer", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.part1.err:1234" return pod1 }, @@ -294,7 +374,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid namespace", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.err:1234" return pod1 }, @@ -305,7 +385,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid number of pieces in the address", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.err:1234" return pod1 }, @@ -316,7 +396,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid peer", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.peer1.err:1234" return pod1 }, @@ -327,7 +407,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid number of pieces in the address without namespaces and partitions", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.err:1234" return pod1 }, @@ -335,10 +415,38 @@ func TestProcessUpstreams(t *testing.T) { consulNamespacesEnabled: false, consulPartitionsEnabled: false, }, + { + name: "upstream with datacenter without ProxyDefaults", + pod: func() *corev1.Pod { + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + return pod1 + }, + expErr: "upstream \"upstream1:1234:dc1\" is invalid: there is no ProxyDefaults config to set mesh gateway mode", + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "upstream with datacenter with ProxyDefaults whose mesh gateway mode is not local or remote", + pod: func() *corev1.Pod { + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + return pod1 + }, + expErr: "upstream \"upstream1:1234:dc1\" is invalid: ProxyDefaults mesh gateway mode is neither \"local\" nor \"remote\"", + configEntry: func() api.ConfigEntry { + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "pd") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = "bad-mode" + return pd + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, { name: "annotated upstream error: both peer and partition provided", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.part1.partition.peer1.peer:1234" return pod1 }, @@ -349,7 +457,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: both peer and dc provided", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.peer1.peer.dc1.dc:1234" return pod1 }, @@ -360,7 +468,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: both dc and partition provided", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.part1.partition.dc1.dc:1234" return pod1 }, @@ -368,16 +476,64 @@ func TestProcessUpstreams(t *testing.T) { consulNamespacesEnabled: true, consulPartitionsEnabled: true, }, + { + name: "upstream with datacenter with ProxyDefaults and mesh gateway is in local mode", + pod: func() *corev1.Pod { + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + return pod1 + }, + expected: []api.Upstream{ + { + DestinationType: api.UpstreamDestTypeService, + DestinationName: "upstream1", + Datacenter: "dc1", + LocalBindPort: 1234, + }, + }, + configEntry: func() api.ConfigEntry { + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "pd") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = api.MeshGatewayModeLocal + return pd + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "upstream with datacenter with ProxyDefaults and mesh gateway in remote mode", + pod: func() *corev1.Pod { + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + return pod1 + }, + expected: []api.Upstream{ + { + DestinationType: api.UpstreamDestTypeService, + DestinationName: "upstream1", + Datacenter: "dc1", + LocalBindPort: 1234, + }, + }, + configEntry: func() api.ConfigEntry { + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "pd") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = api.MeshGatewayModeRemote + return pd + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, { name: "when consul is unavailable, we don't return an error", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" return pod1 }, expErr: "", configEntry: func() api.ConfigEntry { - ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "pd") pd := ce.(*api.ProxyConfigEntry) pd.MeshGateway.Mode = "remote" return pd @@ -397,7 +553,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "single upstream", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream:1234" return pod1 }, @@ -414,7 +570,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "single upstream with namespace", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream.foo:1234" return pod1 }, @@ -432,7 +588,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "single upstream with namespace and partition", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream.foo.bar:1234" return pod1 }, @@ -451,7 +607,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "multiple upstreams", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1:1234, upstream2:2234" return pod1 }, @@ -473,12 +629,12 @@ func TestProcessUpstreams(t *testing.T) { { name: "multiple upstreams with consul namespaces, partitions and datacenters", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1:1234, upstream2.bar:2234, upstream3.foo.baz:3234:dc2" return pod1 }, configEntry: func() api.ConfigEntry { - ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "pd") pd := ce.(*api.ProxyConfigEntry) pd.MeshGateway.Mode = "remote" return pd @@ -509,12 +665,12 @@ func TestProcessUpstreams(t *testing.T) { { name: "multiple upstreams with consul namespaces and datacenters", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "upstream1:1234, upstream2.bar:2234, upstream3.foo:3234:dc2" return pod1 }, configEntry: func() api.ConfigEntry { - ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "pd") pd := ce.(*api.ProxyConfigEntry) pd.MeshGateway.Mode = "remote" return pd @@ -543,7 +699,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "prepared query upstream", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "prepared_query:queryname:1234" return pod1 }, @@ -560,7 +716,7 @@ func TestProcessUpstreams(t *testing.T) { { name: "prepared query and non-query upstreams and annotated non-query upstreams", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationUpstreams] = "prepared_query:queryname:1234, upstream1:2234, prepared_query:6687bd19-5654-76be-d764:8202, upstream2.svc:3234" return pod1 }, @@ -592,8 +748,34 @@ func TestProcessUpstreams(t *testing.T) { } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { + // Create test consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + httpAddr := consul.HTTPAddr + if tt.consulUnavailable { + httpAddr = "hostname.does.not.exist:8500" + } + consulClient, err := api.NewClient(&api.Config{ + Address: httpAddr, + }) + require.NoError(t, err) + addr := strings.Split(httpAddr, ":") + consulPort := addr[1] + + if tt.configEntry != nil { + consulClient.ConfigEntries().Set(tt.configEntry(), &api.WriteOptions{}) + } + ep := &EndpointsController{ Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), EnableConsulNamespaces: tt.consulNamespacesEnabled, @@ -629,7 +811,7 @@ func TestGetServiceName(t *testing.T) { { name: "single port, with annotation", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationService] = "web" return pod1 }, @@ -644,7 +826,7 @@ func TestGetServiceName(t *testing.T) { { name: "single port, without annotation", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) return pod1 }, endpoint: &corev1.Endpoints{ @@ -658,7 +840,7 @@ func TestGetServiceName(t *testing.T) { { name: "multi port, with annotation", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationService] = "web,web-admin" return pod1 }, @@ -674,7 +856,7 @@ func TestGetServiceName(t *testing.T) { for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - svcName := serviceName(*tt.pod(), *tt.endpoint) + svcName := getServiceName(*tt.pod(), *tt.endpoint) require.Equal(t, tt.expSvcName, svcName) }) @@ -683,21 +865,22 @@ func TestGetServiceName(t *testing.T) { func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { - name string - consulSvcName string - k8sObjects func() []runtime.Object - initialConsulSvcs []*api.AgentService - expectedNumSvcInstances int - expectedConsulSvcInstances []*api.CatalogService - expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck + name string + consulSvcName string + k8sObjects func() []runtime.Object + initialConsulSvcs []*api.AgentServiceRegistration + expectedNumSvcInstances int + expectedConsulSvcInstancesMap map[string][]*api.CatalogService + expectedProxySvcInstancesMap map[string][]*api.CatalogService + expectedAgentHealthChecks []*api.AgentCheck }{ { name: "Multiport service", consulSvcName: "web,web-admin", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationPort] = "8080,9090" pod1.Annotations[annotationService] = "web,web-admin" pod1.Annotations[annotationUpstreams] = "upstream1:1234" @@ -710,7 +893,8 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -730,7 +914,8 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -743,149 +928,148 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { } return []runtime.Object{pod1, endpoint1, endpoint2} }, - initialConsulSvcs: nil, + initialConsulSvcs: []*api.AgentServiceRegistration{}, expectedNumSvcInstances: 1, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-web", - ServiceName: "web", - ServiceAddress: "1.2.3.4", - ServicePort: 8080, - ServiceMeta: map[string]string{ - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "web", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", + expectedConsulSvcInstancesMap: map[string][]*api.CatalogService{ + "web": { + { + ServiceID: "pod1-web", + ServiceName: "web", + ServiceAddress: "1.2.3.4", + ServicePort: 8080, + ServiceMeta: map[string]string{ + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "web", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + }, + ServiceTags: []string{}, }, - ServiceTags: []string{}, }, - { - ServiceID: "pod1-web-admin", - ServiceName: "web-admin", - ServiceAddress: "1.2.3.4", - ServicePort: 9090, - ServiceMeta: map[string]string{ - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "web-admin", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", + "web-admin": { + { + ServiceID: "pod1-web-admin", + ServiceName: "web-admin", + ServiceAddress: "1.2.3.4", + ServicePort: 9090, + ServiceMeta: map[string]string{ + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "web-admin", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + }, + ServiceTags: []string{}, }, - ServiceTags: []string{}, }, }, - expectedProxySvcInstances: []*api.CatalogService{ - - { - ServiceID: "pod1-web-sidecar-proxy", - ServiceName: "web-sidecar-proxy", - ServiceAddress: "1.2.3.4", - ServicePort: 20000, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "web", - DestinationServiceID: "pod1-web", - LocalServiceAddress: "127.0.0.1", - LocalServicePort: 8080, - Upstreams: []api.Upstream{ - { - DestinationType: api.UpstreamDestTypeService, - DestinationName: "upstream1", - LocalBindPort: 1234, + expectedProxySvcInstancesMap: map[string][]*api.CatalogService{ + "web": { + { + ServiceID: "pod1-web-sidecar-proxy", + ServiceName: "web-sidecar-proxy", + ServiceAddress: "1.2.3.4", + ServicePort: 20000, + ServiceProxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "web", + DestinationServiceID: "pod1-web", + LocalServiceAddress: "127.0.0.1", + LocalServicePort: 8080, + Upstreams: []api.Upstream{ + { + DestinationType: api.UpstreamDestTypeService, + DestinationName: "upstream1", + LocalBindPort: 1234, + }, }, }, + ServiceMeta: map[string]string{ + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "web", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + }, + ServiceTags: []string{}, }, - ServiceMeta: map[string]string{ - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "web", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, }, - { - ServiceID: "pod1-web-admin-sidecar-proxy", - ServiceName: "web-admin-sidecar-proxy", - ServiceAddress: "1.2.3.4", - ServicePort: 20001, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "web-admin", - DestinationServiceID: "pod1-web-admin", - LocalServiceAddress: "127.0.0.1", - LocalServicePort: 9090, - }, - ServiceMeta: map[string]string{ - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "web-admin", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", + "web-admin": { + { + ServiceID: "pod1-web-admin-sidecar-proxy", + ServiceName: "web-admin-sidecar-proxy", + ServiceAddress: "1.2.3.4", + ServicePort: 20001, + ServiceProxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "web-admin", + DestinationServiceID: "pod1-web-admin", + LocalServiceAddress: "127.0.0.1", + LocalServicePort: 9090, + }, + ServiceMeta: map[string]string{ + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "web-admin", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + }, + ServiceTags: []string{}, }, - ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-web", + CheckID: "default/pod1-web/kubernetes-health-check", ServiceName: "web", ServiceID: "pod1-web", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-web-sidecar-proxy", - ServiceName: "web-sidecar-proxy", - ServiceID: "pod1-web-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, { - CheckID: "default/pod1-web-admin", + CheckID: "default/pod1-web-admin/kubernetes-health-check", ServiceName: "web-admin", ServiceID: "pod1-web-admin", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-web-admin-sidecar-proxy", - ServiceName: "web-admin-sidecar-proxy", - ServiceID: "pod1-web-admin-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, }, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + // Add the default namespace. ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} // Create fake k8s client - k8sObjects := append(tt.k8sObjects(), &ns) + k8sObjects := append(tt.k8sObjects(), fakeClientPod, &ns) fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] // Register service and proxy in consul. for _, svc := range tt.initialConsulSvcs { - catalogRegistration := &api.CatalogRegistration{ - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: svc, - } - _, err := consulClient.Catalog().Register(catalogRegistration, nil) + err = consulClient.Agent().ServiceRegister(svc) require.NoError(t, err) } @@ -893,12 +1077,14 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: "default", + ConsulClientCfg: cfg, } namespacedName := types.NamespacedName{ Namespace: "default", @@ -922,28 +1108,28 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { // After reconciliation, Consul should have the service with the correct number of instances svcs := strings.Split(tt.consulSvcName, ",") - for i, service := range svcs { + for _, service := range svcs { serviceInstances, _, err := consulClient.Catalog().Service(service, "", nil) require.NoError(t, err) require.Len(t, serviceInstances, tt.expectedNumSvcInstances) - for _, instance := range serviceInstances { - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceName, instance.ServiceName) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServicePort, instance.ServicePort) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceMeta, instance.ServiceMeta) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceTags, instance.ServiceTags) + for i, instance := range serviceInstances { + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceName, instance.ServiceName) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceAddress, instance.ServiceAddress) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServicePort, instance.ServicePort) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceMeta, instance.ServiceMeta) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceTags, instance.ServiceTags) } proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", service), "", nil) require.NoError(t, err) require.Len(t, proxyServiceInstances, tt.expectedNumSvcInstances) - for _, instance := range proxyServiceInstances { - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceName, instance.ServiceName) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) - require.Equal(t, tt.expectedProxySvcInstances[i].ServicePort, instance.ServicePort) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceMeta, instance.ServiceMeta) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceTags, instance.ServiceTags) + for i, instance := range proxyServiceInstances { + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceName, instance.ServiceName) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceAddress, instance.ServiceAddress) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServicePort, instance.ServicePort) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceMeta, instance.ServiceMeta) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceTags, instance.ServiceTags) // When comparing the ServiceProxy field we ignore the DestinationNamespace // field within that struct because on Consul OSS it's set to "" but on Consul Enterprise @@ -953,44 +1139,57 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { // To do the comparison that ignores that field we use go-cmp instead // of the regular require.Equal call since it supports ignoring certain // fields. - diff := cmp.Diff(tt.expectedProxySvcInstances[i].ServiceProxy, instance.ServiceProxy, + diff := cmp.Diff(tt.expectedProxySvcInstancesMap[service][i].ServiceProxy, instance.ServiceProxy, cmpopts.IgnoreFields(api.Upstream{}, "DestinationNamespace", "DestinationPartition")) require.Empty(t, diff, "expected objects to be equal") } + _, checkInfos, err := consulClient.Agent().AgentHealthServiceByName(fmt.Sprintf("%s-sidecar-proxy", service)) + expectedChecks := []string{"Proxy Public Listener", "Destination Alias"} + require.NoError(t, err) + require.Len(t, checkInfos, tt.expectedNumSvcInstances) + for _, checkInfo := range checkInfos { + checks := checkInfo.Checks + require.Contains(t, expectedChecks, checks[0].Name) + require.Contains(t, expectedChecks, checks[1].Name) + } } // Check that the Consul health check was created for the k8s pod. - for _, expectedCheck := range tt.expectedHealthChecks { - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, nil) - require.NoError(t, err) - require.Equal(t, len(checks), 1) - // Ignoring Namespace because the response from ENT includes it and OSS does not. - var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) + if tt.expectedAgentHealthChecks != nil { + for i := range tt.expectedAgentHealthChecks { + filter := fmt.Sprintf("CheckID == `%s`", tt.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, len(check), 1) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[tt.expectedAgentHealthChecks[i].CheckID], tt.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } } }) } } // TestReconcileCreateEndpoint tests the logic to create service instances in Consul from the addresses in the Endpoints -// object. This test covers EndpointsController.createServiceRegistrations and EndpointsController.createGatewayRegistrations. -// This test depends on a Consul binary being present on the host machine. +// object. The cases test an empty endpoints object, a basic endpoints object with one address, a basic endpoints object +// with two addresses, and an endpoints object with every possible customization. +// This test covers EndpointsController.createServiceRegistrations. func TestReconcileCreateEndpoint(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { name string - svcName string consulSvcName string k8sObjects func() []runtime.Object + initialConsulSvcs []*api.AgentServiceRegistration + expectedNumSvcInstances int expectedConsulSvcInstances []*api.CatalogService expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck - metricsEnabled bool + expectedAgentHealthChecks []*api.AgentCheck expErr string }{ { name: "Empty endpoints", - svcName: "service-created", consulSvcName: "service-created", k8sObjects: func() []runtime.Object { endpoint := &corev1.Endpoints{ @@ -1006,16 +1205,17 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{endpoint} }, - expectedConsulSvcInstances: nil, - expectedProxySvcInstances: nil, - expectedHealthChecks: nil, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 0, + expectedConsulSvcInstances: []*api.CatalogService{}, + expectedProxySvcInstances: []*api.CatalogService{}, + expectedAgentHealthChecks: nil, }, { name: "Basic endpoints", - svcName: "service-created", consulSvcName: "service-created", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-created", @@ -1025,7 +1225,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -1038,15 +1239,16 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 1, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-created", ServiceName: "service-created", ServiceAddress: "1.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -1061,123 +1263,51 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-created", + CheckID: "default/pod1-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod1-service-created", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, }, }, { - name: "Mesh Gateway", - svcName: "mesh-gateway", - consulSvcName: "mesh-gateway", + name: "Endpoints with multiple addresses", + consulSvcName: "service-created", k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("mesh-gateway", "1.2.3.4", map[string]string{ - annotationGatewayConsulServiceName: "mesh-gateway", - annotationGatewayWANSource: "Static", - annotationGatewayWANAddress: "2.3.4.5", - annotationGatewayWANPort: "443", - annotationMeshGatewayContainerPort: "8443", - annotationGatewayKind: MeshGateway}) - endpoint := &corev1.Endpoints{ + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod2 := createPod("pod2", "2.2.3.4", true, true) + endpointWithTwoAddresses := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "mesh-gateway", + Name: "service-created", Namespace: "default", }, Subsets: []corev1.EndpointSubset{ { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", - Name: "mesh-gateway", + Name: "pod1", Namespace: "default", }, }, - }, - }, - }, - } - return []runtime.Object{gateway, endpoint} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "mesh-gateway", - ServiceName: "mesh-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 8443, - ServiceMeta: map[string]string{MetaKeyPodName: "mesh-gateway", MetaKeyKubeServiceName: "mesh-gateway", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 8443, - }, - "wan": { - Address: "2.3.4.5", - Port: 443, - }, - }, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/mesh-gateway", - ServiceName: "mesh-gateway", - ServiceID: "mesh-gateway", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - }, - }, - { - name: "Mesh Gateway with Metrics enabled", - svcName: "mesh-gateway", - consulSvcName: "mesh-gateway", - k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("mesh-gateway", "1.2.3.4", map[string]string{ - annotationGatewayConsulServiceName: "mesh-gateway", - annotationGatewayWANSource: "Static", - annotationGatewayWANAddress: "2.3.4.5", - annotationGatewayWANPort: "443", - annotationMeshGatewayContainerPort: "8443", - annotationGatewayKind: MeshGateway}) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mesh-gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "2.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", - Name: "mesh-gateway", + Name: "pod2", Namespace: "default", }, }, @@ -1185,426 +1315,26 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, }, } - return []runtime.Object{gateway, endpoint} + return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 2, expectedConsulSvcInstances: []*api.CatalogService{ { - ServiceID: "mesh-gateway", - ServiceName: "mesh-gateway", + ServiceID: "pod1-service-created", + ServiceName: "service-created", ServiceAddress: "1.2.3.4", - ServicePort: 8443, - ServiceMeta: map[string]string{MetaKeyPodName: "mesh-gateway", MetaKeyKubeServiceName: "mesh-gateway", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServicePort: 0, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 8443, - }, - "wan": { - Address: "2.3.4.5", - Port: 443, - }, - }, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_prometheus_bind_addr": "1.2.3.4:20200", - }, - }, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/mesh-gateway", - ServiceName: "mesh-gateway", - ServiceID: "mesh-gateway", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - }, - metricsEnabled: true, - }, - { - name: "Terminating Gateway", - svcName: "terminating-gateway", - consulSvcName: "terminating-gateway", - k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("terminating-gateway", "1.2.3.4", map[string]string{ - annotationGatewayKind: TerminatingGateway, - annotationGatewayConsulServiceName: "terminating-gateway", - }) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "terminating-gateway", - Namespace: "default", - }, - }, - }, - }, - }, - } - return []runtime.Object{gateway, endpoint} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "terminating-gateway", - ServiceName: "terminating-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 8443, - ServiceMeta: map[string]string{ - MetaKeyPodName: "terminating-gateway", - MetaKeyKubeServiceName: "terminating-gateway", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/terminating-gateway", - ServiceName: "terminating-gateway", - ServiceID: "terminating-gateway", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - }, - }, - { - name: "Terminating Gateway with Metrics enabled", - metricsEnabled: true, - svcName: "terminating-gateway", - consulSvcName: "terminating-gateway", - k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("terminating-gateway", "1.2.3.4", map[string]string{ - annotationGatewayKind: TerminatingGateway, - annotationGatewayConsulServiceName: "terminating-gateway", - }) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "terminating-gateway", - Namespace: "default", - }, - }, - }, - }, - }, - } - return []runtime.Object{gateway, endpoint} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "terminating-gateway", - ServiceName: "terminating-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 8443, - ServiceMeta: map[string]string{ - MetaKeyPodName: "terminating-gateway", - MetaKeyKubeServiceName: "terminating-gateway", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_prometheus_bind_addr": "1.2.3.4:20200", - }, - }, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/terminating-gateway", - ServiceName: "terminating-gateway", - ServiceID: "terminating-gateway", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - }, - }, - { - name: "Ingress Gateway", - svcName: "ingress-gateway", - consulSvcName: "ingress-gateway", - k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("ingress-gateway", "1.2.3.4", map[string]string{ - annotationGatewayConsulServiceName: "ingress-gateway", - annotationGatewayKind: IngressGateway, - annotationGatewayWANSource: "Service", - annotationGatewayWANPort: "8443", - }) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ingress-gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "ingress-gateway", - Namespace: "default", - }, - }, - }, - }, - }, - } - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ingress-gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "5.6.7.8", - }, - }, - }, - }, - } - return []runtime.Object{gateway, endpoint, svc} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "ingress-gateway", - ServiceName: "ingress-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 21000, - ServiceMeta: map[string]string{ - MetaKeyPodName: "ingress-gateway", - MetaKeyKubeServiceName: "ingress-gateway", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8443, - }, - }, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_gateway_no_default_bind": true, - "envoy_gateway_bind_addresses": map[string]interface{}{ - "all-interfaces": map[string]interface{}{ - "address": "0.0.0.0", - }, - }, - }, - }, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/ingress-gateway", - ServiceName: "ingress-gateway", - ServiceID: "ingress-gateway", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - }, - }, - { - name: "Ingress Gateway with Metrics enabled", - metricsEnabled: true, - svcName: "ingress-gateway", - consulSvcName: "ingress-gateway", - k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("ingress-gateway", "1.2.3.4", map[string]string{ - annotationGatewayConsulServiceName: "ingress-gateway", - annotationGatewayKind: IngressGateway, - annotationGatewayWANSource: "Service", - annotationGatewayWANPort: "8443", - }) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ingress-gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "ingress-gateway", - Namespace: "default", - }, - }, - }, - }, - }, - } - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ingress-gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "5.6.7.8", - }, - }, - }, - }, - } - return []runtime.Object{gateway, endpoint, svc} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "ingress-gateway", - ServiceName: "ingress-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 21000, - ServiceMeta: map[string]string{ - MetaKeyPodName: "ingress-gateway", - MetaKeyKubeServiceName: "ingress-gateway", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8443, - }, - }, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_gateway_no_default_bind": true, - "envoy_gateway_bind_addresses": map[string]interface{}{ - "all-interfaces": map[string]interface{}{ - "address": "0.0.0.0", - }, - }, - "envoy_prometheus_bind_addr": "1.2.3.4:20200", - }, - }, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/ingress-gateway", - ServiceName: "ingress-gateway", - ServiceID: "ingress-gateway", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - }, - }, - { - name: "Endpoints with multiple addresses", - svcName: "service-created", - consulSvcName: "service-created", - k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod2 := createServicePod("pod2", "2.2.3.4", true, true) - endpointWithTwoAddresses := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-created", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: "default", - }, - }, - { - IP: "2.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod2", - Namespace: "default", - }, - }, - }, - }, - }, - } - return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-created", - ServiceName: "service-created", - ServiceAddress: "1.2.3.4", - ServicePort: 0, - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, { ServiceID: "pod2-service-created", ServiceName: "service-created", ServiceAddress: "2.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -1619,7 +1349,7 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, { @@ -1633,46 +1363,28 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-created", + CheckID: "default/pod1-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod1-service-created", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, { - CheckID: "default/pod2-service-created", + CheckID: "default/pod2-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod2-service-created", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod2-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod2-service-created-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, }, }, @@ -1681,11 +1393,10 @@ func TestReconcileCreateEndpoint(t *testing.T) { // on the invalid address but continue and process the other addresses. We check for error specific to // pod3 being non-existant at the end, and validate the other 2 addresses have service instances. name: "Endpoints with multiple addresses but one is invalid", - svcName: "service-created", consulSvcName: "service-created", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod2 := createServicePod("pod2", "2.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod2 := createPod("pod2", "2.2.3.4", true, true) endpointWithTwoAddresses := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-created", @@ -1696,7 +1407,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { Addresses: []corev1.EndpointAddress{ // This is an invalid address because pod3 will not exist in k8s. { - IP: "9.9.9.9", + IP: "9.9.9.9", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod3", @@ -1705,7 +1417,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, // The next two are valid addresses. { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -1713,7 +1426,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, }, { - IP: "2.2.3.4", + IP: "2.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod2", @@ -1726,24 +1440,24 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 2, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-created", ServiceName: "service-created", ServiceAddress: "1.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, { ServiceID: "pod2-service-created", ServiceName: "service-created", ServiceAddress: "2.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -1758,7 +1472,7 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, { @@ -1772,47 +1486,37 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-created", + CheckID: "default/pod1-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod1-service-created", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, { - CheckID: "default/pod2-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod2-service-created-sidecar-proxy", - Name: ConsulKubernetesCheckName, + CheckID: "default/pod2-service-created/kubernetes-health-check", + ServiceName: "service-created", + ServiceID: "pod2-service-created", + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, }, expErr: "1 error occurred:\n\t* pods \"pod3\" not found\n\n", }, { name: "Every configurable field set: port, different Consul service name, meta, tags, upstreams, metrics", - svcName: "service-created", consulSvcName: "different-consul-svc-name", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) pod1.Annotations[annotationPort] = "1234" pod1.Annotations[annotationService] = "different-consul-svc-name" pod1.Annotations[fmt.Sprintf("%sname", annotationMeta)] = "abc" @@ -1832,7 +1536,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -1845,6 +1550,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 1, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-different-consul-svc-name", @@ -1859,10 +1566,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", }, - ServiceTags: []string{"abc,123", "pod1", "def,456", "pod1"}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, + ServiceTags: []string{"abc,123", "pod1", "def,456", "pod1"}, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -1895,29 +1600,19 @@ func TestReconcileCreateEndpoint(t *testing.T) { MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", }, ServiceTags: []string{"abc,123", "pod1", "def,456", "pod1"}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-different-consul-svc-name", + CheckID: "default/pod1-different-consul-svc-name/kubernetes-health-check", ServiceName: "different-consul-svc-name", ServiceID: "pod1-different-consul-svc-name", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-different-consul-svc-name-sidecar-proxy", - ServiceName: "different-consul-svc-name-sidecar-proxy", - ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, }, }, @@ -1925,11 +1620,10 @@ func TestReconcileCreateEndpoint(t *testing.T) { // register the mesh pods. { name: "Some endpoints injected, some not.", - svcName: "service-created", consulSvcName: "service-created", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod2 := createServicePod("pod2", "2.3.4.5", false, false) + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod2 := createPod("pod2", "2.3.4.5", false, false) // NOTE: the order of the addresses is important. The non-mesh pod must be first to correctly // reproduce the bug where we were exiting the loop early if any pod was non-mesh. @@ -1942,7 +1636,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "2.3.4.5", + IP: "2.3.4.5", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod2", @@ -1950,7 +1645,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, }, { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -1963,15 +1659,16 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 1, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-created", ServiceName: "service-created", ServiceAddress: "1.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -1986,65 +1683,77 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, MetaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-created", + CheckID: "default/pod1-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod1-service-created", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, }, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + // Add the default namespace. ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} // Create fake k8s client - k8sObjects := append(tt.k8sObjects(), &ns) + k8sObjects := append(tt.k8sObjects(), fakeClientPod, &ns) fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create test consulServer server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + // Create test consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] - // Create the endpoints controller. + // Register service and proxy in consul. + for _, svc := range tt.initialConsulSvcs { + err = consulClient.Agent().ServiceRegister(svc) + require.NoError(t, err) + } + + // Create the endpoints controller ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consulServer", + ReleaseName: "consul", ReleaseNamespace: "default", - } - if tt.metricsEnabled { - ep.MetricsConfig = MetricsConfig{ - DefaultEnableMetrics: true, - EnableGatewayMetrics: true, - } + ConsulClientCfg: cfg, } namespacedName := types.NamespacedName{ Namespace: "default", - Name: tt.svcName, + Name: "service-created", } resp, err := ep.Reconcile(context.Background(), ctrl.Request{ @@ -2060,7 +1769,7 @@ func TestReconcileCreateEndpoint(t *testing.T) { // After reconciliation, Consul should have the service with the correct number of instances serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", nil) require.NoError(t, err) - require.Len(t, serviceInstances, len(tt.expectedConsulSvcInstances)) + require.Len(t, serviceInstances, tt.expectedNumSvcInstances) for i, instance := range serviceInstances { require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceName, instance.ServiceName) @@ -2068,12 +1777,10 @@ func TestReconcileCreateEndpoint(t *testing.T) { require.Equal(t, tt.expectedConsulSvcInstances[i].ServicePort, instance.ServicePort) require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceMeta, instance.ServiceMeta) require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceTags, instance.ServiceTags) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceTaggedAddresses, instance.ServiceTaggedAddresses) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceProxy, instance.ServiceProxy) } proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", nil) require.NoError(t, err) - require.Len(t, proxyServiceInstances, len(tt.expectedProxySvcInstances)) + require.Len(t, proxyServiceInstances, tt.expectedNumSvcInstances) for i, instance := range proxyServiceInstances { require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) require.Equal(t, tt.expectedProxySvcInstances[i].ServiceName, instance.ServiceName) @@ -2095,15 +1802,27 @@ func TestReconcileCreateEndpoint(t *testing.T) { require.Empty(t, diff, "expected objects to be equal") } - // Check that the Consul health expectedCheck was created for the k8s pod. - for _, expectedCheck := range tt.expectedHealthChecks { - filter := fmt.Sprintf("ServiceID == %q", expectedCheck.ServiceID) - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, &api.QueryOptions{Filter: filter}) - require.NoError(t, err) - require.Equal(t, len(checks), 1) - // Ignoring Namespace because the response from ENT includes it and OSS does not. - var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) + _, checkInfos, err := consulClient.Agent().AgentHealthServiceByName(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName)) + expectedChecks := []string{"Proxy Public Listener", "Destination Alias"} + require.NoError(t, err) + require.Len(t, checkInfos, tt.expectedNumSvcInstances) + for _, checkInfo := range checkInfos { + checks := checkInfo.Checks + require.Contains(t, expectedChecks, checks[0].Name) + require.Contains(t, expectedChecks, checks[1].Name) + } + + // Check that the Consul health check was created for the k8s pod. + if tt.expectedAgentHealthChecks != nil { + for i := range tt.expectedConsulSvcInstances { + filter := fmt.Sprintf("CheckID == `%s`", tt.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, len(check), 1) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[tt.expectedAgentHealthChecks[i].CheckID], tt.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } } }) } @@ -2121,25 +1840,28 @@ func TestReconcileCreateEndpoint(t *testing.T) { // // For the register and deregister codepath, this also tests that they work when the Consul service name is different // from the K8s service name. -// This test covers EndpointsController.deregisterService when services should be selectively deregistered +// This test covers EndpointsController.deregisterServiceOnAllAgents when services should be selectively deregistered // since the map will not be nil. func TestReconcileUpdateEndpoint(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { name string consulSvcName string k8sObjects func() []runtime.Object - initialConsulSvcs []*api.CatalogRegistration + initialConsulSvcs []*api.AgentServiceRegistration expectedConsulSvcInstances []*api.CatalogService expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck + expectedAgentHealthChecks []*api.AgentCheck enableACLs bool }{ + // Legacy services are not managed by endpoints controller, but endpoints controller + // will still add/update the legacy service's health checks. { - name: "Endpoints has an updated address because health check changes from unhealthy to healthy", + name: "Legacy service: Health check is added when the pod is healthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, false) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2149,7 +1871,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2162,48 +1885,22 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{MetaKeyKubeNS: "default"}, - }, - Check: &api.AgentCheck{ - CheckID: "default/pod1-service-updated", - Name: ConsulKubernetesCheckName, - Type: ConsulKubernetesCheckType, - Status: api.HealthCritical, - ServiceID: "pod1-service-updated", - ServiceName: "service-updated", - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Meta: map[string]string{MetaKeyKubeNS: "default"}, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - }, - Check: &api.AgentCheck{ - CheckID: "default/pod1-service-updated-sidecar-proxy", - Name: ConsulKubernetesCheckName, - Type: ConsulKubernetesCheckType, - Status: api.HealthCritical, - ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, @@ -2219,32 +1916,23 @@ func TestReconcileUpdateEndpoint(t *testing.T) { ServiceAddress: "1.2.3.4", }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-updated", + CheckID: "default/pod1-service-updated/kubernetes-health-check", ServiceName: "service-updated", ServiceID: "pod1-service-updated", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod1-service-updated-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, }, }, { - name: "Endpoints has an updated address because health check changes from healthy to unhealthy", + name: "Legacy service: Health check is added when the pod is unhealthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, false) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2254,7 +1942,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { NotReadyAddresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2267,48 +1956,22 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{MetaKeyKubeNS: "default"}, - }, - Check: &api.AgentCheck{ - CheckID: "default/pod1-service-updated", - Name: ConsulKubernetesCheckName, - Type: ConsulKubernetesCheckType, - Status: api.HealthPassing, - ServiceName: "service-updated", - ServiceID: "pod1-service-updated", - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Meta: map[string]string{MetaKeyKubeNS: "default"}, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - }, - Check: &api.AgentCheck{ - CheckID: "default/pod1-service-updated-sidecar-proxy", - Name: ConsulKubernetesCheckName, - Type: ConsulKubernetesCheckType, - Status: api.HealthPassing, - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod1-service-updated-sidecar-proxy", + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, @@ -2324,32 +1987,23 @@ func TestReconcileUpdateEndpoint(t *testing.T) { ServiceAddress: "1.2.3.4", }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-updated", + CheckID: "default/pod1-service-updated/kubernetes-health-check", ServiceName: "service-updated", ServiceID: "pod1-service-updated", - Name: ConsulKubernetesCheckName, - Status: api.HealthCritical, - Output: "Pod \"default/pod1\" is not ready", - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod1-service-updated-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthCritical, Output: "Pod \"default/pod1\" is not ready", - Type: ConsulKubernetesCheckType, + Type: ttl, }, }, }, { - name: "Endpoints has an updated address (pod IP change).", + name: "Legacy service: Service health check is updated when the pod goes from healthy --> unhealthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "4.4.4.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, false) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2357,9 +2011,10 @@ func TestReconcileUpdateEndpoint(t *testing.T) { }, Subsets: []corev1.EndpointSubset{ { - Addresses: []corev1.EndpointAddress{ + NotReadyAddresses: []corev1.EndpointAddress{ { - IP: "4.4.4.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2372,66 +2027,62 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeNS: "default", - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "service-updated", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Check: &api.AgentServiceCheck{ + CheckID: "default/pod1-service-updated/kubernetes-health-check", + Name: "Kubernetes Health Check", + TTL: "100000h", + Status: api.HealthPassing, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, }, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeNS: "default", - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "service-updated", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - }, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated", - ServiceAddress: "4.4.4.4", + ServiceAddress: "1.2.3.4", }, }, expectedProxySvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceAddress: "4.4.4.4", + ServiceAddress: "1.2.3.4", + }, + }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: "default/pod1-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthCritical, + Output: "Pod \"default/pod1\" is not ready", + Type: ttl, }, }, }, { - name: "Different Consul service name: Endpoints has an updated address (pod IP change).", - consulSvcName: "different-consul-svc-name", + name: "Legacy service: Service health check is updated when the pod goes from unhealthy --> healthy", + consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "4.4.4.4", true, true) - pod1.Annotations[annotationService] = "different-consul-svc-name" + pod1 := createPod("pod1", "1.2.3.4", true, false) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2441,7 +2092,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "4.4.4.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2454,67 +2106,63 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyKubeNS: "default", - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "service-updated", - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Check: &api.AgentServiceCheck{ + CheckID: "default/pod1-service-updated/kubernetes-health-check", + Name: "Kubernetes Health Check", + TTL: "100000h", + Status: api.HealthCritical, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, }, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{ - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyKubeNS: "default", - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "service-updated", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, expectedConsulSvcInstances: []*api.CatalogService{ { - ServiceID: "pod1-different-consul-svc-name", - ServiceAddress: "4.4.4.4", + ServiceID: "pod1-service-updated", + ServiceAddress: "1.2.3.4", }, }, expectedProxySvcInstances: []*api.CatalogService{ { - ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", - ServiceAddress: "4.4.4.4", + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + }, + }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: "default/pod1-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, }, }, }, { - name: "Endpoints has additional address not in Consul", + name: "Endpoints has an updated address because health check changes from unhealthy to healthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod2 := createServicePod("pod2", "2.2.3.4", true, true) - endpointWithTwoAddresses := &corev1.Endpoints{ + pod1 := createPod("pod1", "1.2.3.4", true, true) + endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", Namespace: "default", @@ -2523,52 +2171,46 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", Namespace: "default", }, }, - { - IP: "2.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod2", - Namespace: "default", - }, - }, }, }, }, } - return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} + return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyKubeNS: "default"}, + Check: &api.AgentServiceCheck{ + CheckID: "default/pod1-service-updated/kubernetes-health-check", + Name: "Kubernetes Health Check", + TTL: "100000h", + Status: api.HealthCritical, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, }, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyKubeNS: "default"}, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, @@ -2577,65 +2219,30 @@ func TestReconcileUpdateEndpoint(t *testing.T) { ServiceID: "pod1-service-updated", ServiceAddress: "1.2.3.4", }, - { - ServiceID: "pod2-service-updated", - ServiceAddress: "2.2.3.4", - }, }, expectedProxySvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated-sidecar-proxy", ServiceAddress: "1.2.3.4", }, - { - ServiceID: "pod2-service-updated-sidecar-proxy", - ServiceAddress: "2.2.3.4", - }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-updated", + CheckID: "default/pod1-service-updated/kubernetes-health-check", ServiceName: "service-updated", ServiceID: "pod1-service-updated", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod1-service-updated-sidecar-proxy", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod2-service-updated", - ServiceName: "service-updated", - ServiceID: "pod2-service-updated", - Name: ConsulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, - }, - { - CheckID: "default/pod2-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod2-service-updated-sidecar-proxy", - Name: ConsulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: ConsulKubernetesCheckType, + Type: ttl, }, }, }, { - name: "Consul has instances that are not in the Endpoints addresses", + name: "Endpoints has an updated address because health check changes from healthy to unhealthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2643,9 +2250,10 @@ func TestReconcileUpdateEndpoint(t *testing.T) { }, Subsets: []corev1.EndpointSubset{ { - Addresses: []corev1.EndpointAddress{ + NotReadyAddresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2658,59 +2266,32 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, - }, - }, + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, - }, - }, - { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyKubeNS: "default"}, + Check: &api.AgentServiceCheck{ + CheckID: "default/pod1-service-updated/kubernetes-health-check", + Name: "Kubernetes Health Check", + TTL: "100000h", + Status: api.HealthPassing, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, }, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyKubeNS: "default"}, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, @@ -2726,13 +2307,23 @@ func TestReconcileUpdateEndpoint(t *testing.T) { ServiceAddress: "1.2.3.4", }, }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: "default/pod1-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthCritical, + Output: "Pod \"default/pod1\" is not ready", + Type: ttl, + }, + }, }, { - name: "Different Consul service name: Consul has instances that are not in the Endpoints addresses", - consulSvcName: "different-consul-svc-name", + name: "Endpoints has an updated address (pod IP change).", + consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[annotationService] = "different-consul-svc-name" + pod1 := createPod("pod1", "4.4.4.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2742,7 +2333,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "4.4.4.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2755,227 +2347,130 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, - }, - }, + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", + MetaKeyManagedBy: managedByValue, }, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", + MetaKeyManagedBy: managedByValue, }, - }, - { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod2-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, expectedConsulSvcInstances: []*api.CatalogService{ { - ServiceID: "pod1-different-consul-svc-name", - ServiceAddress: "1.2.3.4", + ServiceID: "pod1-service-updated", + ServiceAddress: "4.4.4.4", }, }, expectedProxySvcInstances: []*api.CatalogService{ { - ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", - ServiceAddress: "1.2.3.4", + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "4.4.4.4", }, }, }, { - // When a k8s deployment is deleted but it's k8s service continues to exist, the endpoints has no addresses - // and the instances should be deleted from Consul. - name: "Consul has instances that are not in the endpoints, and the endpoints has no addresses.", - consulSvcName: "service-updated", + name: "Different Consul service name: Endpoints has an updated address (pod IP change).", + consulSvcName: "different-consul-svc-name", k8sObjects: func() []runtime.Object { + pod1 := createPod("pod1", "4.4.4.4", true, true) + pod1.Annotations[annotationService] = "different-consul-svc-name" endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", Namespace: "default", }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "4.4.4.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: "default", + }, + }, + }, + }, + }, } - return []runtime.Object{endpoint} + return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, - }, - }, - { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyManagedBy: managedByValue, + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", }, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", }, - }, - { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + Meta: map[string]string{ + MetaKeyManagedBy: managedByValue, + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", }, }, }, - expectedConsulSvcInstances: []*api.CatalogService{}, - expectedProxySvcInstances: []*api.CatalogService{}, - }, - { - // With a different Consul service name, when a k8s deployment is deleted but it's k8s service continues to - // exist, the endpoints has no addresses and the instances should be deleted from Consul. - name: "Different Consul service name: Consul has instances that are not in the endpoints, and the endpoints has no addresses.", - consulSvcName: "different-consul-svc-name", - k8sObjects: func() []runtime.Object { - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: "default", - }, - } - return []runtime.Object{endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, - }, - }, - { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, - }, - }, + expectedConsulSvcInstances: []*api.CatalogService{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, - }, + ServiceID: "pod1-different-consul-svc-name", + ServiceAddress: "4.4.4.4", }, + }, + expectedProxySvcInstances: []*api.CatalogService{ { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod2-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, - }, + ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", + ServiceAddress: "4.4.4.4", }, }, - expectedConsulSvcInstances: []*api.CatalogService{}, - expectedProxySvcInstances: []*api.CatalogService{}, }, { - name: "ACLs enabled: Endpoints has an updated address because the target pod changes", + name: "Endpoints has additional address not in Consul", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod2 := createServicePod("pod2", "4.4.4.4", true, true) - endpoint := &corev1.Endpoints{ + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod2 := createPod("pod2", "2.2.3.4", true, true) + endpointWithTwoAddresses := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", Namespace: "default", @@ -2984,7 +2479,17 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "4.4.4.4", + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: "default", + }, + }, + { + IP: "2.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod2", @@ -2995,82 +2500,74 @@ func TestReconcileUpdateEndpoint(t *testing.T) { }, }, } - return []runtime.Object{pod2, endpoint} + return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeNS: "default", - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "service-updated", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - }, - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeNS: "default", - MetaKeyPodName: "pod1", - MetaKeyKubeServiceName: "service-updated", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - }, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated", + ServiceAddress: "1.2.3.4", + }, { ServiceID: "pod2-service-updated", - ServiceAddress: "4.4.4.4", - ServiceMeta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod2", - }, + ServiceAddress: "2.2.3.4", }, }, expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + }, { ServiceID: "pod2-service-updated-sidecar-proxy", - ServiceAddress: "4.4.4.4", - ServiceMeta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod2", - }, + ServiceAddress: "2.2.3.4", + }, + }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: "default/pod1-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, + }, + { + CheckID: "default/pod2-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod2-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, }, }, - enableACLs: true, }, { - name: "ACLs enabled: Consul has instances that are not in the Endpoints addresses", + name: "Consul has instances that are not in the Endpoints addresses", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -3080,7 +2577,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -3093,123 +2591,65 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod1", - }, - }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod1", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod2", - }, - }, + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod2", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, }, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated", - ServiceName: "service-updated", ServiceAddress: "1.2.3.4", - ServiceMeta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod1", - }, }, }, expectedProxySvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", ServiceAddress: "1.2.3.4", - ServiceMeta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod1", - }, }, }, - enableACLs: true, }, - // When a Deployment has the mesh annotation removed, Kube will delete the old pods. When it deletes the last Pod, - // the endpoints object will contain only non-mesh pods, but you'll still have one consul service instance to clean up. { - name: "When a Deployment moves from mesh to non mesh its service instances should be deleted", - consulSvcName: "service-updated", + name: "Different Consul service name: Consul has instances that are not in the Endpoints addresses", + consulSvcName: "different-consul-svc-name", k8sObjects: func() []runtime.Object { - pod2 := createServicePod("pod2", "2.3.4.5", false, false) + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationService] = "different-consul-svc-name" endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -3219,10 +2659,11 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "2.3.4.5", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", - Name: "pod2", + Name: "pod1", Namespace: "default", }, }, @@ -3230,827 +2671,1750 @@ func TestReconcileUpdateEndpoint(t *testing.T) { }, }, } - return []runtime.Object{pod2, endpoint} + return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod1", - }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-updated", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod1", - }, + ID: "pod2-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod2-different-consul-svc-name", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-different-consul-svc-name", + ServiceAddress: "1.2.3.4", + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", + ServiceAddress: "1.2.3.4", }, }, - expectedConsulSvcInstances: nil, - expectedProxySvcInstances: nil, }, - } - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - // Add the default namespace. - ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} - // Create fake k8s client. - k8sObjects := append(tt.k8sObjects(), &ns) - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - - // Create test consulServer server - adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - if tt.enableACLs { - c.ACL.Enabled = tt.enableACLs - c.ACL.Tokens.InitialManagement = adminToken + { + // When a k8s deployment is deleted but it's k8s service continues to exist, the endpoints has no addresses + // and the instances should be deleted from Consul. + name: "Consul has instances that are not in the endpoints, and the endpoints has no addresses.", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, } - }) - consulClient := testClient.APIClient - - // Holds token accessorID for each service ID. - tokensForServices := make(map[string]string) - - // Register service and proxy in consul. - for _, svc := range tt.initialConsulSvcs { - _, err := consulClient.Catalog().Register(svc, nil) - require.NoError(t, err) - - // Create a token for this service if ACLs are enabled. - if tt.enableACLs { - if svc.Service.Kind != api.ServiceKindConnectProxy { - test.SetupK8sAuthMethod(t, consulClient, svc.Service.Service, svc.Service.Meta[MetaKeyKubeNS]) - token, _, err := consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Service.Meta[MetaKeyKubeNS], svc.Service.Meta[MetaKeyPodName]), - }, - }, nil) - // Record each token we create. - require.NoError(t, err) - tokensForServices[svc.ID] = token.AccessorID - - // Create another token for the same service but a pod that either no longer exists - // or the endpoints controller doesn't know about it yet. - // This is to test a scenario with either orphaned tokens - // or tokens for services that haven't yet been registered with Consul. - // In that case, we have a token for the pod but the service instance - // for that pod either no longer exists or is not yet registered in Consul. - // This token should not be deleted. - token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Service.Meta[MetaKeyKubeNS], "does-not-exist"), - }, - }, nil) - require.NoError(t, err) - tokensForServices["does-not-exist"+svc.Service.Service] = token.AccessorID - } - } - } - - // Create the endpoints controller. - ep := &EndpointsController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: "default", - } - if tt.enableACLs { - ep.AuthMethod = test.AuthMethod - } - namespacedName := types.NamespacedName{Namespace: "default", Name: "service-updated"} - - resp, err := ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) - require.NoError(t, err) - require.False(t, resp.Requeue) - - // After reconciliation, Consul should have service-updated with the correct number of instances. - serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", nil) - require.NoError(t, err) - require.Len(t, serviceInstances, len(tt.expectedConsulSvcInstances)) - for i, instance := range serviceInstances { - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) - } - proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", nil) - require.NoError(t, err) - require.Len(t, proxyServiceInstances, len(tt.expectedProxySvcInstances)) - for i, instance := range proxyServiceInstances { - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) - } - // Check that the Consul health check was created for the k8s pod. - for _, expectedCheck := range tt.expectedHealthChecks { - filter := fmt.Sprintf("ServiceID == %q", expectedCheck.ServiceID) - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, &api.QueryOptions{Filter: filter}) - require.NoError(t, err) - require.Equal(t, 1, len(checks)) - // Ignoring Namespace because the response from ENT includes it and OSS does not. - var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) - } - - if tt.enableACLs { - // Put expected services into a map to make it easier to find service IDs. - expectedServices := mapset.NewSet() - for _, svc := range tt.expectedConsulSvcInstances { - expectedServices.Add(svc.ServiceID) - } - - initialServices := mapset.NewSet() - for _, svc := range tt.initialConsulSvcs { - initialServices.Add(svc.ID) - } - - // We only care about a case when services are deregistered, where - // the set of initial services is bigger than the set of expected services. - deregisteredServices := initialServices.Difference(expectedServices) - - // Look through the tokens we've created and check that only - // tokens for the deregistered services have been deleted. - for sID, tokenID := range tokensForServices { - // Read the token from Consul. - token, _, err := consulClient.ACL().TokenRead(tokenID, nil) - if deregisteredServices.Contains(sID) { - require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") - } else { - require.NoError(t, err, "token should exist for service instance: "+sID) - require.NotNil(t, token) - } - } - } - }) - } -} - -// Tests deleting an Endpoints object, with and without matching Consul and K8s service names. -// This test covers EndpointsController.deregisterService when the map is nil (not selectively deregistered). -func TestReconcileDeleteEndpoint(t *testing.T) { - t.Parallel() - cases := []struct { - name string - consulSvcName string - expectServicesToBeDeleted bool - initialConsulSvcs []*api.AgentService - enableACLs bool - }{ - { - name: "Legacy service: does not delete", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: false, - initialConsulSvcs: []*api.AgentService{ + return []runtime.Object{endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ { - ID: "pod1-service-deleted", - Service: "service-deleted", + ID: "pod1-service-updated", + Name: "service-updated", Port: 80, Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default"}, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", Port: 20000, Address: "1.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-deleted", - DestinationServiceID: "pod1-service-deleted", + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default"}, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, - }, - }, - { - name: "Consul service name matches K8s service name", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ { - ID: "pod1-service-deleted", - Service: "service-deleted", + ID: "pod2-service-updated", + Name: "service-updated", Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", Port: 20000, - Address: "1.2.3.4", + Address: "2.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-deleted", - DestinationServiceID: "pod1-service-deleted", + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", }, - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, }, + expectedConsulSvcInstances: []*api.CatalogService{}, + expectedProxySvcInstances: []*api.CatalogService{}, }, { - name: "Consul service name does not match K8s service name", - consulSvcName: "different-consul-svc-name", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ + // With a different Consul service name, when a k8s deployment is deleted but it's k8s service continues to + // exist, the endpoints has no addresses and the instances should be deleted from Consul. + name: "Different Consul service name: Consul has instances that are not in the endpoints, and the endpoints has no addresses.", + consulSvcName: "different-consul-svc-name", + k8sObjects: func() []runtime.Object { + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, + } + return []runtime.Object{endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ { ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", + Name: "different-consul-svc-name", Port: 80, Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { Kind: api.ServiceKindConnectProxy, ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", Port: 20000, Address: "1.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "different-consul-svc-name", DestinationServiceID: "pod1-different-consul-svc-name", }, - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, - }, - }, - { - name: "When ACLs are enabled, the token should be deleted", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ { - ID: "pod1-service-deleted", - Service: "service-deleted", + ID: "pod2-different-consul-svc-name", + Name: "different-consul-svc-name", Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod1", - }, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", + ID: "pod2-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", Port: 20000, - Address: "1.2.3.4", + Address: "2.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-deleted", - DestinationServiceID: "pod1-service-deleted", - }, - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod1", + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod2-different-consul-svc-name", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, }, - enableACLs: true, + expectedConsulSvcInstances: []*api.CatalogService{}, + expectedProxySvcInstances: []*api.CatalogService{}, }, { - name: "Mesh Gateway", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ - { - ID: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Service: "mesh-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", - MetaKeyKubeNS: "default", - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "mesh-gateway", + name: "ACLs enabled: Endpoints has an updated address because the target pod changes", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod2 := createPod("pod2", "4.4.4.4", true, true) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "4.4.4.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod2", + Namespace: "default", + }, + }, + }, }, }, - }, + } + return []runtime.Object{pod2, endpoint} }, - }, - { - name: "When ACLs are enabled, the mesh-gateway token should be deleted", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - ID: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Service: "mesh-gateway", + ID: "pod1-service-updated", + Name: "service-updated", Port: 80, Address: "1.2.3.4", Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "mesh-gateway", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, }, }, - }, - enableACLs: true, - }, - { - name: "Ingress Gateway", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ { - ID: "ingress-gateway", - Kind: api.ServiceKindIngressGateway, - Service: "ingress-gateway", - Port: 21000, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, Address: "1.2.3.4", Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "ingress-gateway", }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, - }, - { - name: "When ACLs are enabled, the ingress-gateway token should be deleted", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ + expectedConsulSvcInstances: []*api.CatalogService{ { - ID: "ingress-gateway", - Kind: api.ServiceKindIngressGateway, - Service: "ingress-gateway", - Port: 21000, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", + ServiceID: "pod2-service-updated", + ServiceAddress: "4.4.4.4", + ServiceMeta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "ingress-gateway", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, + MetaKeyPodName: "pod2", }, }, }, - enableACLs: true, - }, - { - name: "Terminating Gateway", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod2-service-updated-sidecar-proxy", + ServiceAddress: "4.4.4.4", + ServiceMeta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", + }, + }, + }, + enableACLs: true, + }, + { + name: "ACLs enabled: Consul has instances that are not in the Endpoints addresses", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createPod("pod1", "1.2.3.4", true, true) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: "default", + }, + }, + }, + }, + }, + } + return []runtime.Object{pod1, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ { - ID: "terminating-gateway", - Kind: api.ServiceKindTerminatingGateway, - Service: "terminating-gateway", - Port: 8443, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, Address: "1.2.3.4", Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + { + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", + }, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", + }, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated", + ServiceName: "service-updated", + ServiceAddress: "1.2.3.4", + ServiceMeta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceName: "service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + ServiceMeta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "terminating-gateway", + MetaKeyPodName: "pod1", }, }, }, + enableACLs: true, }, + // When a Deployment has the mesh annotation removed, Kube will delete the old pods. When it deletes the last Pod, + // the endpoints object will contain only non-mesh pods, but you'll still have one consul service instance to clean up. { - name: "When ACLs are enabled, the terminating-gateway token should be deleted", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ + name: "When a Deployment moves from mesh to non mesh its service instances should be deleted", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod2 := createPod("pod2", "2.3.4.5", false, false) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "2.3.4.5", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod2", + Namespace: "default", + }, + }, + }, + }, + }, + } + return []runtime.Object{pod2, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ { - ID: "terminating-gateway", - Kind: api.ServiceKindTerminatingGateway, - Service: "terminating-gateway", - Port: 8443, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, Address: "1.2.3.4", Meta: map[string]string{ - MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "terminating-gateway", + MetaKeyPodName: "pod1", }, }, }, - enableACLs: true, + expectedConsulSvcInstances: nil, + expectedProxySvcInstances: nil, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + // Add the default namespace. ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} // Create fake k8s client. - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(&ns).Build() + k8sObjects := append(tt.k8sObjects(), fakeClientPod, &ns) + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create test consulServer server + // Create test consul server. adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { if tt.enableACLs { c.ACL.Enabled = tt.enableACLs c.ACL.Tokens.InitialManagement = adminToken } + c.NodeName = nodeName }) - consulClient := testClient.APIClient + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] - // Register service and proxy in consul - var token *api.ACLToken + cfg := &api.Config{Scheme: "http", Address: consul.HTTPAddr} + if tt.enableACLs { + cfg.Token = adminToken + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + + // Holds token accessorID for each service ID. + tokensForServices := make(map[string]string) + + // Register service and proxy in consul. for _, svc := range tt.initialConsulSvcs { - serviceRegistration := &api.CatalogRegistration{ - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: svc, - } - _, err := consulClient.Catalog().Register(serviceRegistration, nil) + err = consulClient.Agent().ServiceRegister(svc) require.NoError(t, err) - // Create a token for it if ACLs are enabled. + // Create a token for this service if ACLs are enabled. if tt.enableACLs { - test.SetupK8sAuthMethod(t, consulClient, svc.Service, "default") - token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - "pod": fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), - "component": tt.consulSvcName, - }, - }, nil) - require.NoError(t, err) + if svc.Kind != api.ServiceKindConnectProxy { + test.SetupK8sAuthMethod(t, consulClient, svc.Name, svc.Meta[MetaKeyKubeNS]) + token, _, err := consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), + }, + }, nil) + // Record each token we create. + require.NoError(t, err) + tokensForServices[svc.ID] = token.AccessorID + + // Create another token for the same service but a pod that either no longer exists + // or the endpoints controller doesn't know about it yet. + // This is to test a scenario with either orphaned tokens + // or tokens for services that haven't yet been registered with Consul. + // In that case, we have a token for the pod but the service instance + // for that pod either no longer exists or is not yet registered in Consul. + // This token should not be deleted. + token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], "does-not-exist"), + }, + }, nil) + require.NoError(t, err) + tokensForServices["does-not-exist"+svc.Name] = token.AccessorID + } } } - // Create the endpoints controller + // Create the endpoints controller. ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: cfg.Scheme, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: "default", + ConsulClientCfg: cfg, } if tt.enableACLs { ep.AuthMethod = test.AuthMethod } + namespacedName := types.NamespacedName{Namespace: "default", Name: "service-updated"} - // Set up the Endpoint that will be reconciled, and reconcile - namespacedName := types.NamespacedName{ - Namespace: "default", - Name: "service-deleted", - } - resp, err := ep.Reconcile(context.Background(), ctrl.Request{ - NamespacedName: namespacedName, - }) + resp, err := ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) require.NoError(t, err) require.False(t, resp.Requeue) - // After reconciliation, Consul should not have any instances of service-deleted + // After reconciliation, Consul should have service-updated with the correct number of instances. serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", nil) - // If it's not managed by endpoints controller (legacy service), Consul should have service instances - if tt.expectServicesToBeDeleted { - require.NoError(t, err) - require.Empty(t, serviceInstances) - proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", nil) - require.NoError(t, err) - require.Empty(t, proxyServiceInstances) - } else { - require.NoError(t, err) - require.NotEmpty(t, serviceInstances) + require.NoError(t, err) + require.Len(t, serviceInstances, len(tt.expectedConsulSvcInstances)) + for i, instance := range serviceInstances { + require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) + } + proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", nil) + require.NoError(t, err) + require.Len(t, proxyServiceInstances, len(tt.expectedProxySvcInstances)) + for i, instance := range proxyServiceInstances { + require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) + } + // Check that the Consul health check was created for the k8s pod. + if tt.expectedAgentHealthChecks != nil { + for i := range tt.expectedConsulSvcInstances { + filter := fmt.Sprintf("CheckID == `%s`", tt.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, len(check), 1) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[tt.expectedAgentHealthChecks[i].CheckID], tt.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } } if tt.enableACLs { - _, _, err = consulClient.ACL().TokenRead(token.AccessorID, nil) - require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") - } - }) - } -} + // Put expected services into a map to make it easier to find service IDs. + expectedServices := mapset.NewSet() + for _, svc := range tt.expectedConsulSvcInstances { + expectedServices.Add(svc.ServiceID) + } -// TestReconcileIgnoresServiceIgnoreLabel tests that the endpoints controller correctly ignores services -// with the service-ignore label and deregisters services previously registered if the service-ignore -// label is added. -func TestReconcileIgnoresServiceIgnoreLabel(t *testing.T) { - t.Parallel() - svcName := "service-ignored" - namespace := "default" + initialServices := mapset.NewSet() + for _, svc := range tt.initialConsulSvcs { + initialServices.Add(svc.ID) + } + + // We only care about a case when services are deregistered, where + // the set of initial services is bigger than the set of expected services. + deregisteredServices := initialServices.Difference(expectedServices) + + // Look through the tokens we've created and check that only + // tokens for the deregistered services have been deleted. + for serviceID, tokenID := range tokensForServices { + // Read the token from Consul. + token, _, err := consulClient.ACL().TokenRead(tokenID, nil) + if deregisteredServices.Contains(serviceID) { + require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") + } else { + require.NoError(t, err, "token should exist for service instance: "+serviceID) + require.NotNil(t, token) + } + } + } + }) + } +} + +// Tests deleting an Endpoints object, with and without matching Consul and K8s service names. +// This test covers EndpointsController.deregisterServiceOnAllAgents when the map is nil (not selectively deregistered). +func TestReconcileDeleteEndpoint(t *testing.T) { + t.Parallel() + nodeName := "test-node" + cases := []struct { + name string + consulSvcName string + expectServicesToBeDeleted bool + initialConsulSvcs []*api.AgentServiceRegistration + enableACLs bool + consulClientReady bool + }{ + { + name: "Legacy service: does not delete", + consulSvcName: "service-deleted", + expectServicesToBeDeleted: false, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-deleted", + Name: "service-deleted", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default"}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-deleted", + DestinationServiceID: "pod1-service-deleted", + }, + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default"}, + }, + }, + consulClientReady: true, + }, + { + name: "Consul service name matches K8s service name", + consulSvcName: "service-deleted", + expectServicesToBeDeleted: true, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-deleted", + Name: "service-deleted", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-deleted", + DestinationServiceID: "pod1-service-deleted", + }, + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + }, + consulClientReady: true, + }, + { + name: "Consul service name does not match K8s service name", + consulSvcName: "different-consul-svc-name", + expectServicesToBeDeleted: true, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", + }, + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + }, + consulClientReady: true, + }, + { + name: "When ACLs are enabled, the token should be deleted", + consulSvcName: "service-deleted", + expectServicesToBeDeleted: true, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-deleted", + Name: "service-deleted", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-deleted", + DestinationServiceID: "pod1-service-deleted", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + }, + enableACLs: true, + consulClientReady: true, + }, + { + name: "When Consul client pod is not ready, services are not deleted", + consulSvcName: "service-deleted", + expectServicesToBeDeleted: false, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-deleted", + Name: "service-deleted", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-deleted", + DestinationServiceID: "pod1-service-deleted", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + }, + consulClientReady: false, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + if !tt.consulClientReady { + fakeClientPod.Status.Conditions = []corev1.PodCondition{{Type: corev1.PodReady, Status: corev1.ConditionFalse}} + } + + // Add the default namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + // Create fake k8s client. + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(fakeClientPod, &ns).Build() + + // Create test consul server. + adminToken := "123e4567-e89b-12d3-a456-426614174000" + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + if tt.enableACLs { + c.ACL.Enabled = true + c.ACL.Tokens.InitialManagement = adminToken + } + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + cfg := &api.Config{Address: consul.HTTPAddr} + if tt.enableACLs { + cfg.Token = adminToken + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] + + // Register service and proxy in consul + var token *api.ACLToken + for _, svc := range tt.initialConsulSvcs { + err = consulClient.Agent().ServiceRegister(svc) + require.NoError(t, err) + + // Create a token for it if ACLs are enabled. + if tt.enableACLs { + test.SetupK8sAuthMethod(t, consulClient, svc.Name, "default") + if svc.Kind != api.ServiceKindConnectProxy { + token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + "pod": fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), + }, + }, nil) + + require.NoError(t, err) + } + } + } + + // Create the endpoints controller + ep := &EndpointsController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + ReleaseName: "consul", + ReleaseNamespace: "default", + ConsulClientCfg: cfg, + } + if tt.enableACLs { + ep.AuthMethod = test.AuthMethod + } + + // Set up the Endpoint that will be reconciled, and reconcile + namespacedName := types.NamespacedName{ + Namespace: "default", + Name: "service-deleted", + } + resp, err := ep.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // After reconciliation, Consul should not have any instances of service-deleted + serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", nil) + // If it's not managed by endpoints controller (legacy service), Consul should have service instances + if tt.expectServicesToBeDeleted { + require.NoError(t, err) + require.Empty(t, serviceInstances) + proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", nil) + require.NoError(t, err) + require.Empty(t, proxyServiceInstances) + } else { + require.NoError(t, err) + require.NotEmpty(t, serviceInstances) + } + + if tt.enableACLs { + _, _, err = consulClient.ACL().TokenRead(token.AccessorID, nil) + require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") + } + }) + } +} + +// TestReconcileIgnoresServiceIgnoreLabel tests that the endpoints controller correctly ignores services +// with the service-ignore label and deregisters services previously registered if the service-ignore +// label is added. +func TestReconcileIgnoresServiceIgnoreLabel(t *testing.T) { + t.Parallel() + nodeName := "test-node" + serviceName := "service-ignored" + namespace := "default" + + cases := map[string]struct { + svcInitiallyRegistered bool + serviceLabels map[string]string + expectedNumSvcInstances int + }{ + "Registered endpoint with label is deregistered.": { + svcInitiallyRegistered: true, + serviceLabels: map[string]string{ + labelServiceIgnore: "true", + }, + expectedNumSvcInstances: 0, + }, + "Not registered endpoint with label is never registered": { + svcInitiallyRegistered: false, + serviceLabels: map[string]string{ + labelServiceIgnore: "true", + }, + expectedNumSvcInstances: 0, + }, + "Registered endpoint without label is unaffected": { + svcInitiallyRegistered: true, + serviceLabels: map[string]string{}, + expectedNumSvcInstances: 1, + }, + "Not registered endpoint without label is registered": { + svcInitiallyRegistered: false, + serviceLabels: map[string]string{}, + expectedNumSvcInstances: 1, + }, + } + + for name, tt := range cases { + t.Run(name, func(t *testing.T) { + // Set up the fake Kubernetes client with an endpoint, pod, consul client, and the default namespace. + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: namespace, + Labels: tt.serviceLabels, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: namespace, + }, + }, + }, + }, + }, + } + pod1 := createPod("pod1", "1.2.3.4", true, true) + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + k8sObjects := []runtime.Object{endpoint, pod1, fakeClientPod, &ns} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test Consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.NodeName = nodeName }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + cfg := &api.Config{Address: consul.HTTPAddr} + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] + + // Set up the initial Consul services. + if tt.svcInitiallyRegistered { + err = consulClient.Agent().ServiceRegister(&api.AgentServiceRegistration{ + ID: "pod1-" + serviceName, + Name: serviceName, + Port: 0, + Address: "1.2.3.4", + Meta: map[string]string{ + "k8s-namespace": namespace, + "k8s-service-name": serviceName, + "managed-by": "consul-k8s-endpoints-controller", + "pod-name": "pod1", + }, + }) + require.NoError(t, err) + err = consulClient.Agent().ServiceRegister(&api.AgentServiceRegistration{ + ID: "pod1-sidecar-proxy-" + serviceName, + Name: serviceName + "-sidecar-proxy", + Port: 0, + Meta: map[string]string{ + "k8s-namespace": namespace, + "k8s-service-name": serviceName, + "managed-by": "consul-k8s-endpoints-controller", + "pod-name": "pod1", + }, + }) + require.NoError(t, err) + } + + // Create the endpoints controller. + ep := &EndpointsController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + ReleaseName: "consul", + ReleaseNamespace: namespace, + ConsulClientCfg: cfg, + } + + // Run the reconcile process to deregister the service if it was registered before. + namespacedName := types.NamespacedName{Namespace: namespace, Name: serviceName} + resp, err := ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Check that the correct number of services are registered with Consul. + serviceInstances, _, err := consulClient.Catalog().Service(serviceName, "", nil) + require.NoError(t, err) + require.Len(t, serviceInstances, tt.expectedNumSvcInstances) + proxyServiceInstances, _, err := consulClient.Catalog().Service(serviceName+"-sidecar-proxy", "", nil) + require.NoError(t, err) + require.Len(t, proxyServiceInstances, tt.expectedNumSvcInstances) + }) + } +} + +// Test that when an endpoints pod specifies the name for the Kubernetes service it wants to use +// for registration, all other endpoints for that pod are skipped. +func TestReconcile_podSpecifiesExplicitService(t *testing.T) { + nodeName := "test-node" + namespace := "default" + + // Set up the fake Kubernetes client with a few endpoints, pod, consul client, and the default namespace. + badEndpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not-in-mesh", + Namespace: namespace, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: namespace, + }, + }, + }, + }, + }, + } + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "in-mesh", + Namespace: namespace, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: namespace, + }, + }, + }, + }, + }, + } + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationKubernetesService] = endpoint.Name + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + k8sObjects := []runtime.Object{badEndpoint, endpoint, pod1, fakeClientPod, &ns} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test Consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.NodeName = nodeName }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + cfg := &api.Config{Address: consul.HTTPAddr} + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] + + // Create the endpoints controller. + ep := &EndpointsController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + ReleaseName: "consul", + ReleaseNamespace: namespace, + ConsulClientCfg: cfg, + } + + serviceName := badEndpoint.Name + + // Initially register the pod with the bad endpoint + err = consulClient.Agent().ServiceRegister(&api.AgentServiceRegistration{ + ID: "pod1-" + serviceName, + Name: serviceName, + Port: 0, + Address: "1.2.3.4", + Meta: map[string]string{ + "k8s-namespace": namespace, + "k8s-service-name": serviceName, + "managed-by": "consul-k8s-endpoints-controller", + "pod-name": "pod1", + }, + }) + require.NoError(t, err) + serviceInstances, _, err := consulClient.Catalog().Service(serviceName, "", nil) + require.NoError(t, err) + require.Len(t, serviceInstances, 1) + + // Run the reconcile process to check service deregistration. + namespacedName := types.NamespacedName{Namespace: badEndpoint.Namespace, Name: serviceName} + resp, err := ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Check that the service has been deregistered with Consul. + serviceInstances, _, err = consulClient.Catalog().Service(serviceName, "", nil) + require.NoError(t, err) + require.Len(t, serviceInstances, 0) + proxyServiceInstances, _, err := consulClient.Catalog().Service(serviceName+"-sidecar-proxy", "", nil) + require.NoError(t, err) + require.Len(t, proxyServiceInstances, 0) + + // Run the reconcile again with the service we want to register. + serviceName = endpoint.Name + namespacedName = types.NamespacedName{Namespace: endpoint.Namespace, Name: serviceName} + resp, err = ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // Check that the correct services are registered with Consul. + serviceInstances, _, err = consulClient.Catalog().Service(serviceName, "", nil) + require.NoError(t, err) + require.Len(t, serviceInstances, 1) + proxyServiceInstances, _, err = consulClient.Catalog().Service(serviceName+"-sidecar-proxy", "", nil) + require.NoError(t, err) + require.Len(t, proxyServiceInstances, 1) +} +func TestFilterAgentPods(t *testing.T) { + t.Parallel() cases := map[string]struct { - svcInitiallyRegistered bool - serviceLabels map[string]string - expectedNumSvcInstances int + object client.Object + expected bool }{ - "Registered endpoint with label is deregistered.": { - svcInitiallyRegistered: true, - serviceLabels: map[string]string{ - labelServiceIgnore: "true", + "label[app]=consul label[component]=client label[release] consul": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "component": "client", + "release": "consul", + }, + }, }, - expectedNumSvcInstances: 0, + expected: true, }, - "Not registered endpoint with label is never registered": { - svcInitiallyRegistered: false, - serviceLabels: map[string]string{ - labelServiceIgnore: "true", + "no labels": { + object: &corev1.Pod{}, + expected: false, + }, + "label[app] empty": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "component": "client", + "release": "consul", + }, + }, + }, + expected: false, + }, + "label[component] empty": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "release": "consul", + }, + }, + }, + expected: false, + }, + "label[release] empty": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "component": "client", + }, + }, + }, + expected: false, + }, + "label[app]!=consul label[component]=client label[release]=consul": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "not-consul", + "component": "client", + "release": "consul", + }, + }, + }, + expected: false, + }, + "label[component]!=client label[app]=consul label[release]=consul": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "component": "not-client", + "release": "consul", + }, + }, + }, + expected: false, + }, + "label[release]!=consul label[app]=consul label[component]=client": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "component": "client", + "release": "not-consul", + }, + }, + }, + expected: false, + }, + "label[app]!=consul label[component]!=client label[release]!=consul": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "not-consul", + "component": "not-client", + "release": "not-consul", + }, + }, + }, + expected: false, + }, + } + + for name, test := range cases { + t.Run(name, func(t *testing.T) { + controller := EndpointsController{ + ReleaseName: "consul", + } + + result := controller.filterAgentPods(test.object) + require.Equal(t, test.expected, result) + }) + } +} + +func TestRequestsForRunningAgentPods(t *testing.T) { + t.Parallel() + cases := map[string]struct { + agentPod *corev1.Pod + existingEndpoints []*corev1.Endpoints + expectedRequests []ctrl.Request + }{ + "pod=running, all endpoints need to be reconciled": { + agentPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-agent", + }, + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + Phase: corev1.PodRunning, + }, + }, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, + }, + }, + }, + }, + expectedRequests: []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: "endpoint-1", + }, + }, + }, + }, + "pod=running, endpoints with ready address need to be reconciled": { + agentPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-agent", + }, + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + Phase: corev1.PodRunning, + }, + }, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + }, + }, + }, + }, + expectedRequests: []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: "endpoint-1", + }, + }, + }, + }, + "pod=running, endpoints with not-ready address need to be reconciled": { + agentPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-agent", + }, + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + Phase: corev1.PodRunning, + }, + }, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + }, + }, + }, + }, + expectedRequests: []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: "endpoint-1", + }, + }, + }, + }, + "pod=running, some endpoints need to be reconciled": { + agentPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-agent", + }, + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + Phase: corev1.PodRunning, + }, + }, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-2", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-other"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, + }, + }, + }, + }, + expectedRequests: []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: "endpoint-1", + }, + }, + { + NamespacedName: types.NamespacedName{ + Name: "endpoint-3", + }, + }, + }, + }, + "pod=running, no endpoints need to be reconciled": { + agentPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-agent", + }, + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + Phase: corev1.PodRunning, + }, + }, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-2", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, + }, + }, + }, }, - expectedNumSvcInstances: 0, - }, - "Registered endpoint without label is unaffected": { - svcInitiallyRegistered: true, - serviceLabels: map[string]string{}, - expectedNumSvcInstances: 1, - }, - "Not registered endpoint without label is registered": { - svcInitiallyRegistered: false, - serviceLabels: map[string]string{}, - expectedNumSvcInstances: 1, + expectedRequests: []ctrl.Request{}, }, - } - - for name, tt := range cases { - t.Run(name, func(t *testing.T) { - // Set up the fake Kubernetes client with an endpoint, pod, consul client, and the default namespace. - endpoint := &corev1.Endpoints{ + "pod not ready, no endpoints need to be reconciled": { + agentPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: svcName, - Namespace: namespace, - Labels: tt.serviceLabels, + Name: "consul-agent", }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: namespace, + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + }, + }, + Phase: corev1.PodRunning, + }, + }, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), }, }, }, }, }, - } - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - k8sObjects := []runtime.Object{endpoint, pod1, &ns} - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - - // Create test consulServer server - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient - - // Set up the initial Consul services. - if tt.svcInitiallyRegistered { - serviceRegistration := &api.CatalogRegistration{ - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-" + svcName, - Service: svcName, - Port: 0, - Address: "1.2.3.4", - Meta: map[string]string{ - MetaKeyKubeNS: namespace, - MetaKeyKubeServiceName: svcName, - MetaKeyManagedBy: managedByValue, - MetaKeySyntheticNode: "true", - MetaKeyPodName: "pod1", + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, }, }, - } - _, err := consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) - require.NoError(t, err) - } - - // Create the endpoints controller. - ep := &EndpointsController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: namespace, - } - - // Run the reconcile process to deregister the service if it was registered before. - namespacedName := types.NamespacedName{Namespace: namespace, Name: svcName} - resp, err := ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) - require.NoError(t, err) - require.False(t, resp.Requeue) - - // Check that the correct number of services are registered with Consul. - serviceInstances, _, err := consulClient.Catalog().Service(svcName, "", nil) - require.NoError(t, err) - require.Len(t, serviceInstances, tt.expectedNumSvcInstances) - proxyServiceInstances, _, err := consulClient.Catalog().Service(svcName+"-sidecar-proxy", "", nil) - require.NoError(t, err) - require.Len(t, proxyServiceInstances, tt.expectedNumSvcInstances) - }) - } -} - -// Test that when an endpoints pod specifies the name for the Kubernetes service it wants to use -// for registration, all other endpoints for that pod are skipped. -func TestReconcile_podSpecifiesExplicitService(t *testing.T) { - namespace := "default" - - // Set up the fake Kubernetes client with a few endpoints, pod, consul client, and the default namespace. - badEndpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-in-mesh", - Namespace: namespace, + }, + }, + expectedRequests: []ctrl.Request{}, }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: namespace, + "pod not running, no endpoints need to be reconciled": { + agentPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-agent", + }, + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, }, }, + Phase: corev1.PodUnknown, }, }, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + }, + }, + }, + }, + expectedRequests: []ctrl.Request{}, }, - } - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "in-mesh", - Namespace: namespace, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: namespace, + "pod is deleted, no endpoints need to be reconciled": { + agentPod: nil, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, }, }, }, }, + expectedRequests: []ctrl.Request{}, }, } - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[annotationKubernetesService] = endpoint.Name - ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - k8sObjects := []runtime.Object{badEndpoint, endpoint, pod1, &ns} - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - - // Create test consulServer server - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient - - // Create the endpoints controller. - ep := &EndpointsController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: namespace, - } - - svcName := badEndpoint.Name - // Initially register the pod with the bad endpoint - _, err := consulClient.Catalog().Register(&api.CatalogRegistration{ - Node: ConsulNodeName, - Address: ConsulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-" + svcName, - Service: svcName, - Port: 0, - Address: "1.2.3.4", - Meta: map[string]string{ - "k8s-namespace": namespace, - "k8s-service-name": svcName, - "managed-by": "consul-k8s-endpoints-controller", - "pod-name": "pod1", - }, - }, - }, nil) - require.NoError(t, err) - serviceInstances, _, err := consulClient.Catalog().Service(svcName, "", nil) - require.NoError(t, err) - require.Len(t, serviceInstances, 1) - - // Run the reconcile process to check service deregistration. - namespacedName := types.NamespacedName{Namespace: badEndpoint.Namespace, Name: svcName} - resp, err := ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) - require.NoError(t, err) - require.False(t, resp.Requeue) - - // Check that the service has been deregistered with Consul. - serviceInstances, _, err = consulClient.Catalog().Service(svcName, "", nil) - require.NoError(t, err) - require.Len(t, serviceInstances, 0) - proxyServiceInstances, _, err := consulClient.Catalog().Service(svcName+"-sidecar-proxy", "", nil) - require.NoError(t, err) - require.Len(t, proxyServiceInstances, 0) + for name, test := range cases { + t.Run(name, func(t *testing.T) { + logger := logrtest.TestLogger{T: t} + s := runtime.NewScheme() + s.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.Pod{}, &corev1.Endpoints{}, &corev1.EndpointsList{}) + var objects []runtime.Object + if test.agentPod != nil { + objects = append(objects, test.agentPod) + } + for _, endpoint := range test.existingEndpoints { + objects = append(objects, endpoint) + } - // Run the reconcile again with the service we want to register. - svcName = endpoint.Name - namespacedName = types.NamespacedName{Namespace: endpoint.Namespace, Name: svcName} - resp, err = ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) - require.NoError(t, err) - require.False(t, resp.Requeue) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() - // Check that the correct services are registered with Consul. - serviceInstances, _, err = consulClient.Catalog().Service(svcName, "", nil) - require.NoError(t, err) - require.Len(t, serviceInstances, 1) - proxyServiceInstances, _, err = consulClient.Catalog().Service(svcName+"-sidecar-proxy", "", nil) - require.NoError(t, err) - require.Len(t, proxyServiceInstances, 1) + controller := &EndpointsController{ + Client: fakeClient, + Scheme: s, + Log: logger, + } + var requests []ctrl.Request + if test.agentPod != nil { + requests = controller.requestsForRunningAgentPods(test.agentPod) + } else { + requests = controller.requestsForRunningAgentPods(minimal()) + } + require.ElementsMatch(t, requests, test.expectedRequests) + }) + } } func TestServiceInstancesForK8SServiceNameAndNamespace(t *testing.T) { @@ -4064,37 +4428,37 @@ func TestServiceInstancesForK8SServiceNameAndNamespace(t *testing.T) { name string k8sServiceNameMeta string k8sNamespaceMeta string - expected []*api.AgentService + expected map[string]*api.AgentService }{ { "no k8s service name or namespace meta", "", "", - nil, + map[string]*api.AgentService{}, }, { "k8s service name set, but no namespace meta", k8sSvc, "", - nil, + map[string]*api.AgentService{}, }, { "k8s namespace set, but no k8s service name meta", "", k8sNS, - nil, + map[string]*api.AgentService{}, }, { "both k8s service name and namespace set", k8sSvc, k8sNS, - []*api.AgentService{ - { + map[string]*api.AgentService{ + "foo1": { ID: "foo1", Service: "foo", Meta: map[string]string{"k8s-service-name": k8sSvc, "k8s-namespace": k8sNS}, }, - { + "foo1-proxy": { Kind: api.ServiceKindConnectProxy, ID: "foo1-proxy", Service: "foo-sidecar-proxy", @@ -4111,18 +4475,18 @@ func TestServiceInstancesForK8SServiceNameAndNamespace(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - servicesInConsul := []*api.AgentService{ + servicesInConsul := []*api.AgentServiceRegistration{ { - ID: "foo1", - Service: "foo", - Tags: []string{}, - Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": c.k8sNamespaceMeta}, + ID: "foo1", + Name: "foo", + Tags: []string{}, + Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": c.k8sNamespaceMeta}, }, { - Kind: api.ServiceKindConnectProxy, - ID: "foo1-proxy", - Service: "foo-sidecar-proxy", - Port: 20000, + Kind: api.ServiceKindConnectProxy, + ID: "foo1-proxy", + Name: "foo-sidecar-proxy", + Port: 20000, Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "foo", DestinationServiceID: "foo1", @@ -4130,16 +4494,16 @@ func TestServiceInstancesForK8SServiceNameAndNamespace(t *testing.T) { Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": c.k8sNamespaceMeta}, }, { - ID: "k8s-service-different-ns-id", - Service: "k8s-service-different-ns", - Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": "different-ns"}, + ID: "k8s-service-different-ns-id", + Name: "k8s-service-different-ns", + Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": "different-ns"}, }, { - Kind: api.ServiceKindConnectProxy, - ID: "k8s-service-different-ns-proxy", - Service: "k8s-service-different-ns-proxy", - Port: 20000, - Tags: []string{}, + Kind: api.ServiceKindConnectProxy, + ID: "k8s-service-different-ns-proxy", + Name: "k8s-service-different-ns-proxy", + Port: 20000, + Tags: []string{}, Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "k8s-service-different-ns", DestinationServiceID: "k8s-service-different-ns-id", @@ -4159,24 +4523,18 @@ func TestServiceInstancesForK8SServiceNameAndNamespace(t *testing.T) { require.NoError(t, err) for _, svc := range servicesInConsul { - catalogRegistration := &api.CatalogRegistration{ - Node: ConsulNodeName, - Address: "127.0.0.1", - Service: svc, - } - _, err = consulClient.Catalog().Register(catalogRegistration, nil) + err := consulClient.Agent().ServiceRegister(svc) require.NoError(t, err) } - ep := EndpointsController{} - svcs, err := ep.serviceInstancesForK8SServiceNameAndNamespace(consulClient, k8sSvc, k8sNS) + svcs, err := serviceInstancesForK8SServiceNameAndNamespace(k8sSvc, k8sNS, consulClient) require.NoError(t, err) - if len(svcs.Services) > 0 { + if len(svcs) > 0 { require.Len(t, svcs, 2) - require.NotNil(t, c.expected[0], svcs.Services[0]) - require.Equal(t, c.expected[0].Service, svcs.Services[0].Service) - require.NotNil(t, c.expected[1], svcs.Services[1]) - require.Equal(t, c.expected[1].Service, svcs.Services[1].Service) + require.NotNil(t, c.expected["foo1"], svcs["foo1"]) + require.Equal(t, c.expected["foo1"].Service, svcs["foo1"].Service) + require.NotNil(t, c.expected["foo1-proxy"], svcs["foo1-proxy"]) + require.Equal(t, c.expected["foo1-proxy"].Service, svcs["foo1-proxy"].Service) } }) } @@ -4976,267 +5334,17 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { }, { Name: "http", - ContainerPort: 8080, - }, - }, - ReadinessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), - }, - }, - }, - }, - }, - service: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ - { - Port: 8081, - }, - }, - }, - }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 8081, - }, - }, - expExposePaths: []api.ExposePath{ - { - ListenerPort: exposedPathsReadinessPortsRangeStart, - LocalPathPort: 8080, - }, - }, - expErr: "", - }, - "startup only probe provided": { - tproxyGlobalEnabled: true, - overwriteProbes: true, - podAnnotations: map[string]string{ - annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"startupProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, - podContainers: []corev1.Container{ - { - Name: "test", - Ports: []corev1.ContainerPort{ - { - Name: "tcp", - ContainerPort: 8081, - }, - { - Name: "http", - ContainerPort: 8080, - }, - }, - StartupProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), - }, - }, - }, - }, - }, - service: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ - { - Port: 8081, - }, - }, - }, - }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 8081, - }, - }, - expExposePaths: []api.ExposePath{ - { - ListenerPort: exposedPathsStartupPortsRangeStart, - LocalPathPort: 8080, - }, - }, - expErr: "", - }, - "all probes provided": { - tproxyGlobalEnabled: true, - overwriteProbes: true, - podAnnotations: map[string]string{ - annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}},\"readinessProbe\":{\"httpGet\":{\"port\":8081}},\"startupProbe\":{\"httpGet\":{\"port\":8081}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, - podContainers: []corev1.Container{ - { - Name: "test", - Ports: []corev1.ContainerPort{ - { - Name: "tcp", - ContainerPort: 8081, - }, - { - Name: "http", - ContainerPort: 8080, - }, - }, - LivenessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), - }, - }, - }, - ReadinessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), - }, - }, - }, - StartupProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), - }, - }, - }, - }, - }, - service: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ - { - Port: 8081, - }, - }, - }, - }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 8081, - }, - }, - expExposePaths: []api.ExposePath{ - { - ListenerPort: exposedPathsLivenessPortsRangeStart, - LocalPathPort: 8080, - }, - { - ListenerPort: exposedPathsReadinessPortsRangeStart, - LocalPathPort: 8081, - }, - { - ListenerPort: exposedPathsStartupPortsRangeStart, - LocalPathPort: 8081, - }, - }, - expErr: "", - }, - "multiple containers with all probes provided": { - tproxyGlobalEnabled: true, - overwriteProbes: true, - podAnnotations: map[string]string{ - annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}},\"readinessProbe\":{\"httpGet\":{\"port\":8081}},\"startupProbe\":{\"httpGet\":{\"port\":8081}}},{\"name\":\"test-2\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8083},{\"name\":\"http\",\"containerPort\":8082}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8082}},\"readinessProbe\":{\"httpGet\":{\"port\":8083}},\"startupProbe\":{\"httpGet\":{\"port\":8083}}},{\"name\":\"envoy-sidecar\",\"ports\":[{\"name\":\"http\",\"containerPort\":20000}],\"resources\":{}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, - podContainers: []corev1.Container{ - { - Name: "test", - Ports: []corev1.ContainerPort{ - { - Name: "tcp", - ContainerPort: 8081, - }, - { - Name: "http", - ContainerPort: 8080, - }, - }, - LivenessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), - }, - }, - }, - ReadinessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), - }, - }, - }, - StartupProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), - }, - }, - }, - }, - { - Name: "test-2", - Ports: []corev1.ContainerPort{ - { - Name: "tcp", - ContainerPort: 8083, - }, - { - Name: "http", - ContainerPort: 8082, - }, - }, - LivenessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart + 1), - }, - }, - }, - ReadinessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart + 1), - }, + ContainerPort: 8080, }, }, - StartupProbe: &corev1.Probe{ + ReadinessProbe: &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(exposedPathsStartupPortsRangeStart + 1), + Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), }, }, }, }, - { - Name: sidecarContainer, - Ports: []corev1.ContainerPort{ - { - Name: "http", - ContainerPort: 20000, - }, - }, - }, }, service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -5260,38 +5368,18 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { }, }, expExposePaths: []api.ExposePath{ - { - ListenerPort: exposedPathsLivenessPortsRangeStart, - LocalPathPort: 8080, - }, { ListenerPort: exposedPathsReadinessPortsRangeStart, - LocalPathPort: 8081, - }, - { - ListenerPort: exposedPathsStartupPortsRangeStart, - LocalPathPort: 8081, - }, - { - ListenerPort: exposedPathsLivenessPortsRangeStart + 1, - LocalPathPort: 8082, - }, - { - ListenerPort: exposedPathsReadinessPortsRangeStart + 1, - LocalPathPort: 8083, - }, - { - ListenerPort: exposedPathsStartupPortsRangeStart + 1, - LocalPathPort: 8083, + LocalPathPort: 8080, }, }, expErr: "", }, - "non-http probe": { + "startup only probe provided": { tproxyGlobalEnabled: true, overwriteProbes: true, podAnnotations: map[string]string{ - annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"tcpSocket\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"startupProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", }, podContainers: []corev1.Container{ { @@ -5306,10 +5394,10 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ContainerPort: 8080, }, }, - LivenessProbe: &corev1.Probe{ + StartupProbe: &corev1.Probe{ Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(8080), + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), }, }, }, @@ -5336,14 +5424,19 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { Port: 8081, }, }, - expExposePaths: nil, - expErr: "", + expExposePaths: []api.ExposePath{ + { + ListenerPort: exposedPathsStartupPortsRangeStart, + LocalPathPort: 8080, + }, + }, + expErr: "", }, - "probes with port names": { + "all probes provided": { tproxyGlobalEnabled: true, overwriteProbes: true, podAnnotations: map[string]string{ - annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":\"tcp\"}},\"readinessProbe\":{\"httpGet\":{\"port\":\"http\"}},\"startupProbe\":{\"httpGet\":{\"port\":\"http\"}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}},\"readinessProbe\":{\"httpGet\":{\"port\":8081}},\"startupProbe\":{\"httpGet\":{\"port\":8081}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", }, podContainers: []corev1.Container{ { @@ -5405,717 +5498,447 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { expExposePaths: []api.ExposePath{ { ListenerPort: exposedPathsLivenessPortsRangeStart, - LocalPathPort: 8081, - }, - { - ListenerPort: exposedPathsReadinessPortsRangeStart, - LocalPathPort: 8080, - }, - { - ListenerPort: exposedPathsStartupPortsRangeStart, - LocalPathPort: 8080, - }, - }, - expErr: "", - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - pod := createServicePod("test-pod-1", "1.2.3.4", true, true) - if c.podAnnotations != nil { - pod.Annotations = c.podAnnotations - } - if c.podContainers != nil { - pod.Spec.Containers = c.podContainers - } - - // We set these annotations explicitly as these are set by the meshWebhook and we - // need these values to determine which port to use for the service registration. - pod.Annotations[annotationPort] = "tcp" - - endpoints := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: pod.Name, - Namespace: pod.Namespace, - }, - }, - }, - }, - }, - } - // Add the pod's namespace. - ns := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: pod.Namespace, Labels: c.namespaceLabels}, - } - var fakeClient client.Client - if c.service != nil { - fakeClient = fake.NewClientBuilder().WithRuntimeObjects(pod, endpoints, c.service, &ns).Build() - } else { - fakeClient = fake.NewClientBuilder().WithRuntimeObjects(pod, endpoints, &ns).Build() - } - - epCtrl := EndpointsController{ - Client: fakeClient, - EnableTransparentProxy: c.tproxyGlobalEnabled, - TProxyOverwriteProbes: c.overwriteProbes, - Log: logrtest.TestLogger{T: t}, - } - - serviceRegistration, proxyServiceRegistration, err := epCtrl.createServiceRegistrations(nil, *pod, *endpoints, api.HealthPassing) - if c.expErr != "" { - require.EqualError(t, err, c.expErr) - } else { - require.NoError(t, err) - - require.Equal(t, c.expProxyMode, proxyServiceRegistration.Service.Proxy.Mode) - require.Equal(t, c.expTaggedAddresses, serviceRegistration.Service.TaggedAddresses) - require.Equal(t, c.expTaggedAddresses, proxyServiceRegistration.Service.TaggedAddresses) - require.Equal(t, c.expExposePaths, proxyServiceRegistration.Service.Proxy.Expose.Paths) - } - }) - } -} - -func TestGetTokenMetaFromDescription(t *testing.T) { - t.Parallel() - cases := map[string]struct { - description string - expectedTokenMeta map[string]string - }{ - "no description prefix": { - description: `{"pod":"default/pod"}`, - expectedTokenMeta: map[string]string{"pod": "default/pod"}, - }, - "consul's default description prefix": { - description: `token created via login: {"pod":"default/pod"}`, - expectedTokenMeta: map[string]string{"pod": "default/pod"}, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - tokenMeta, err := getTokenMetaFromDescription(c.description) - require.NoError(t, err) - require.Equal(t, c.expectedTokenMeta, tokenMeta) - }) - } -} - -func TestMapAddresses(t *testing.T) { - t.Parallel() - cases := map[string]struct { - addresses corev1.EndpointSubset - expected map[corev1.EndpointAddress]string - }{ - "ready and not ready addresses": { - addresses: corev1.EndpointSubset{ - Addresses: []corev1.EndpointAddress{ - {Hostname: "host1"}, - {Hostname: "host2"}, - }, - NotReadyAddresses: []corev1.EndpointAddress{ - {Hostname: "host3"}, - {Hostname: "host4"}, - }, - }, - expected: map[corev1.EndpointAddress]string{ - {Hostname: "host1"}: api.HealthPassing, - {Hostname: "host2"}: api.HealthPassing, - {Hostname: "host3"}: api.HealthCritical, - {Hostname: "host4"}: api.HealthCritical, - }, - }, - "ready addresses only": { - addresses: corev1.EndpointSubset{ - Addresses: []corev1.EndpointAddress{ - {Hostname: "host1"}, - {Hostname: "host2"}, - {Hostname: "host3"}, - {Hostname: "host4"}, - }, - NotReadyAddresses: []corev1.EndpointAddress{}, - }, - expected: map[corev1.EndpointAddress]string{ - {Hostname: "host1"}: api.HealthPassing, - {Hostname: "host2"}: api.HealthPassing, - {Hostname: "host3"}: api.HealthPassing, - {Hostname: "host4"}: api.HealthPassing, - }, - }, - "not ready addresses only": { - addresses: corev1.EndpointSubset{ - Addresses: []corev1.EndpointAddress{}, - NotReadyAddresses: []corev1.EndpointAddress{ - {Hostname: "host1"}, - {Hostname: "host2"}, - {Hostname: "host3"}, - {Hostname: "host4"}, - }, - }, - expected: map[corev1.EndpointAddress]string{ - {Hostname: "host1"}: api.HealthCritical, - {Hostname: "host2"}: api.HealthCritical, - {Hostname: "host3"}: api.HealthCritical, - {Hostname: "host4"}: api.HealthCritical, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - actual := mapAddresses(c.addresses) - require.Equal(t, c.expected, actual) - }) - } -} - -func Test_GetWANData(t *testing.T) { - cases := map[string]struct { - gatewayPod corev1.Pod - gatewayEndpoint corev1.Endpoints - k8sObjects func() []runtime.Object - wanAddr string - wanPort int - expErr string - }{ - "source=NodeName": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "NodeName", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", - }, - }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - }, - }, - }, - } - return []runtime.Object{service} - }, - wanAddr: "test-nodename", - wanPort: 1234, - }, - "source=HostIP": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "NodeIP", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", - }, + LocalPathPort: 8080, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + { + ListenerPort: exposedPathsReadinessPortsRangeStart, + LocalPathPort: 8081, }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + { + ListenerPort: exposedPathsStartupPortsRangeStart, + LocalPathPort: 8081, }, }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, + expErr: "", + }, + "multiple containers with all probes provided": { + tproxyGlobalEnabled: true, + overwriteProbes: true, + podAnnotations: map[string]string{ + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}},\"readinessProbe\":{\"httpGet\":{\"port\":8081}},\"startupProbe\":{\"httpGet\":{\"port\":8081}}},{\"name\":\"test-2\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8083},{\"name\":\"http\",\"containerPort\":8082}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8082}},\"readinessProbe\":{\"httpGet\":{\"port\":8083}},\"startupProbe\":{\"httpGet\":{\"port\":8083}}},{\"name\":\"envoy-sidecar\",\"ports\":[{\"name\":\"http\",\"containerPort\":20000}],\"resources\":{}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), + }, + }, }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), }, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-host-ip", - wanPort: 1234, - }, - "source=Static": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "Static", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", + StartupProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), + }, + }, }, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", - }, - }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + { + Name: "test-2", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8083, + }, + { + Name: "http", + ContainerPort: 8082, + }, }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart + 1), + }, + }, }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart + 1), }, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-wan-address", - wanPort: 1234, - }, - "source=Service, serviceType=NodePort": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "Service", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", + StartupProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsStartupPortsRangeStart + 1), + }, + }, }, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + { + Name: envoySidecarContainer, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 20000, + }, + }, }, }, - gatewayEndpoint: corev1.Endpoints{ + service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", + Name: serviceName, Namespace: "default", }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeNodePort, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8081, }, }, - } - return []runtime.Object{service} + }, }, - wanAddr: "test-host-ip", - wanPort: 1234, - }, - "source=Service, serviceType=ClusterIP": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "Service", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", - }, + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "10.0.0.1", + Port: 8081, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + }, + expExposePaths: []api.ExposePath{ + { + ListenerPort: exposedPathsLivenessPortsRangeStart, + LocalPathPort: 8080, }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + { + ListenerPort: exposedPathsReadinessPortsRangeStart, + LocalPathPort: 8081, }, - }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + { + ListenerPort: exposedPathsStartupPortsRangeStart, + LocalPathPort: 8081, }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - }, - }, - }, - } - return []runtime.Object{service} - }, - wanAddr: "test-cluster-ip", - wanPort: 1234, - }, - "source=Service, serviceType=LoadBalancer,IP": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "Service", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", - }, + { + ListenerPort: exposedPathsLivenessPortsRangeStart + 1, + LocalPathPort: 8082, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + { + ListenerPort: exposedPathsReadinessPortsRangeStart + 1, + LocalPathPort: 8083, }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + { + ListenerPort: exposedPathsStartupPortsRangeStart + 1, + LocalPathPort: 8083, }, }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, + expErr: "", + }, + "non-http probe": { + tproxyGlobalEnabled: true, + overwriteProbes: true, + podAnnotations: map[string]string{ + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"tcpSocket\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "test-loadbalancer-ip", - }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(8080), }, }, }, - } - return []runtime.Object{service} + }, }, - wanAddr: "test-loadbalancer-ip", - wanPort: 1234, - }, - "source=Service, serviceType=LoadBalancer,Hostname": { - gatewayPod: corev1.Pod{ + service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "Service", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + Name: serviceName, + Namespace: "default", }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8081, + }, + }, }, }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "10.0.0.1", + Port: 8081, }, }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + expExposePaths: nil, + expErr: "", + }, + "probes with port names": { + tproxyGlobalEnabled: true, + overwriteProbes: true, + podAnnotations: map[string]string{ + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":\"tcp\"}},\"readinessProbe\":{\"httpGet\":{\"port\":\"http\"}},\"startupProbe\":{\"httpGet\":{\"port\":\"http\"}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + }, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), + }, + }, }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - Hostname: "test-loadbalancer-hostname", - }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), }, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - }, - "no Source annotation": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", + StartupProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), + }, + }, }, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", - }, }, - gatewayEndpoint: corev1.Endpoints{ + service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", + Name: serviceName, Namespace: "default", }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - Hostname: "test-loadbalancer-hostname", - }, - }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8081, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - expErr: "failed to read annotation consul.hashicorp.com/gateway-wan-address-source", - }, - "no Service with Source=Service": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "Service", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", }, }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "10.0.0.1", + Port: 8081, }, }, - k8sObjects: func() []runtime.Object { return nil }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - expErr: "failed to read service gateway in namespace default", - }, - "WAN Port annotation misconfigured": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "Service", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "not-a-valid-port", - }, + expExposePaths: []api.ExposePath{ + { + ListenerPort: exposedPathsLivenessPortsRangeStart, + LocalPathPort: 8081, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + { + ListenerPort: exposedPathsReadinessPortsRangeStart, + LocalPathPort: 8080, }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + { + ListenerPort: exposedPathsStartupPortsRangeStart, + LocalPathPort: 8080, }, }, - gatewayEndpoint: corev1.Endpoints{ + expErr: "", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + pod := createPod("test-pod-1", "1.2.3.4", true, true) + if c.podAnnotations != nil { + pod.Annotations = c.podAnnotations + } + if c.podContainers != nil { + pod.Spec.Containers = c.podContainers + } + + // We set these annotations explicitly as these are set by the meshWebhook and we + // need these values to determine which port to use for the service registration. + pod.Annotations[annotationPort] = "tcp" + + endpoints := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", + Name: serviceName, Namespace: "default", }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - Hostname: "test-loadbalancer-hostname", + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: pod.Name, + Namespace: pod.Namespace, }, }, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - expErr: "failed to parse WAN port from value not-a-valid-port", + }, + } + // Add the pod's namespace. + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: pod.Namespace, Labels: c.namespaceLabels}, + } + var fakeClient client.Client + if c.service != nil { + fakeClient = fake.NewClientBuilder().WithRuntimeObjects(pod, endpoints, c.service, &ns).Build() + } else { + fakeClient = fake.NewClientBuilder().WithRuntimeObjects(pod, endpoints, &ns).Build() + } + + epCtrl := EndpointsController{ + Client: fakeClient, + EnableTransparentProxy: c.tproxyGlobalEnabled, + TProxyOverwriteProbes: c.overwriteProbes, + Log: logrtest.TestLogger{T: t}, + } + + serviceRegistration, proxyServiceRegistration, err := epCtrl.createServiceRegistrations(*pod, *endpoints) + if c.expErr != "" { + require.EqualError(t, err, c.expErr) + } else { + require.NoError(t, err) + + require.Equal(t, c.expProxyMode, proxyServiceRegistration.Proxy.Mode) + require.Equal(t, c.expTaggedAddresses, serviceRegistration.TaggedAddresses) + require.Equal(t, c.expTaggedAddresses, proxyServiceRegistration.TaggedAddresses) + require.Equal(t, c.expExposePaths, proxyServiceRegistration.Proxy.Expose.Paths) + } + }) + } +} + +func TestGetTokenMetaFromDescription(t *testing.T) { + t.Parallel() + cases := map[string]struct { + description string + expectedTokenMeta map[string]string + }{ + "no description prefix": { + description: `{"pod":"default/pod"}`, + expectedTokenMeta: map[string]string{"pod": "default/pod"}, }, - "source=Service, serviceType=LoadBalancer no Ingress configured": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - annotationGatewayWANSource: "Service", - annotationGatewayWANAddress: "test-wan-address", - annotationGatewayWANPort: "1234", - }, + "consul's default description prefix": { + description: `token created via login: {"pod":"default/pod"}`, + expectedTokenMeta: map[string]string{"pod": "default/pod"}, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + tokenMeta, err := getTokenMetaFromDescription(c.description) + require.NoError(t, err) + require.Equal(t, c.expectedTokenMeta, tokenMeta) + }) + } +} + +func TestMapAddresses(t *testing.T) { + t.Parallel() + cases := map[string]struct { + addresses corev1.EndpointSubset + expected map[corev1.EndpointAddress]string + }{ + "ready and not ready addresses": { + addresses: corev1.EndpointSubset{ + Addresses: []corev1.EndpointAddress{ + {Hostname: "host1"}, + {Hostname: "host2"}, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + NotReadyAddresses: []corev1.EndpointAddress{ + {Hostname: "host3"}, + {Hostname: "host4"}, }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + }, + expected: map[corev1.EndpointAddress]string{ + {Hostname: "host1"}: api.HealthPassing, + {Hostname: "host2"}: api.HealthPassing, + {Hostname: "host3"}: api.HealthCritical, + {Hostname: "host4"}: api.HealthCritical, + }, + }, + "ready addresses only": { + addresses: corev1.EndpointSubset{ + Addresses: []corev1.EndpointAddress{ + {Hostname: "host1"}, + {Hostname: "host2"}, + {Hostname: "host3"}, + {Hostname: "host4"}, }, + NotReadyAddresses: []corev1.EndpointAddress{}, }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + expected: map[corev1.EndpointAddress]string{ + {Hostname: "host1"}: api.HealthPassing, + {Hostname: "host2"}: api.HealthPassing, + {Hostname: "host3"}: api.HealthPassing, + {Hostname: "host4"}: api.HealthPassing, + }, + }, + "not ready addresses only": { + addresses: corev1.EndpointSubset{ + Addresses: []corev1.EndpointAddress{}, + NotReadyAddresses: []corev1.EndpointAddress{ + {Hostname: "host1"}, + {Hostname: "host2"}, + {Hostname: "host3"}, + {Hostname: "host4"}, }, }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{}, - }, - }, - } - return []runtime.Object{service} + expected: map[corev1.EndpointAddress]string{ + {Hostname: "host1"}: api.HealthCritical, + {Hostname: "host2"}: api.HealthCritical, + {Hostname: "host3"}: api.HealthCritical, + {Hostname: "host4"}: api.HealthCritical, }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - expErr: "failed to read ingress config for loadbalancer for service gateway in namespace default", }, } for name, c := range cases { t.Run(name, func(t *testing.T) { - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(c.k8sObjects()...).Build() - epCtrl := EndpointsController{ - Client: fakeClient, - } - addr, port, err := epCtrl.getWanData(c.gatewayPod, c.gatewayEndpoint) - if c.expErr == "" { - require.NoError(t, err) - require.Equal(t, c.wanAddr, addr) - require.Equal(t, c.wanPort, port) - } else { - require.EqualError(t, err, c.expErr) - } + actual := mapAddresses(c.addresses) + require.Equal(t, c.expected, actual) }) } } -func createServicePod(name, ip string, inject bool, managedByEndpointsController bool) *corev1.Pod { +func createPod(name, ip string, inject bool, managedByEndpointsController bool) *corev1.Pod { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -6124,7 +5947,8 @@ func createServicePod(name, ip string, inject bool, managedByEndpointsController Annotations: map[string]string{}, }, Status: corev1.PodStatus{ - PodIP: ip, + PodIP: ip, + HostIP: "127.0.0.1", Conditions: []corev1.PodCondition{ { Type: corev1.PodReady, @@ -6143,23 +5967,6 @@ func createServicePod(name, ip string, inject bool, managedByEndpointsController return pod } -func createGatewayPod(name, ip string, annotations map[string]string) *corev1.Pod { - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: "default", - Labels: map[string]string{keyManagedBy: managedByValue}, - Annotations: annotations, - }, - Status: corev1.PodStatus{ - PodIP: ip, - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, - } - return pod +func toStringPtr(input string) *string { + return &input } diff --git a/control-plane/connect-inject/envoy_sidecar.go b/control-plane/connect-inject/envoy_sidecar.go new file mode 100644 index 0000000000..7bf55afe60 --- /dev/null +++ b/control-plane/connect-inject/envoy_sidecar.go @@ -0,0 +1,217 @@ +package connectinject + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/google/shlex" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/pointer" +) + +func (w *MeshWebhook) envoySidecar(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { + resources, err := w.envoySidecarResources(pod) + if err != nil { + return corev1.Container{}, err + } + + multiPort := mpi.serviceName != "" + cmd, err := w.getContainerSidecarCommand(pod, mpi.serviceName, mpi.serviceIndex) + if err != nil { + return corev1.Container{}, err + } + + containerName := envoySidecarContainer + if multiPort { + containerName = fmt.Sprintf("%s-%s", envoySidecarContainer, mpi.serviceName) + } + + container := corev1.Container{ + Name: containerName, + Image: w.ImageEnvoy, + Env: []corev1.EnvVar{ + { + Name: "HOST_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.hostIP"}, + }, + }, + }, + Resources: resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }, + Command: cmd, + } + + // Add any extra Envoy VolumeMounts. + if _, ok := pod.Annotations[annotationConsulSidecarUserVolumeMount]; ok { + var volumeMount []corev1.VolumeMount + err := json.Unmarshal([]byte(pod.Annotations[annotationConsulSidecarUserVolumeMount]), &volumeMount) + if err != nil { + return corev1.Container{}, err + } + container.VolumeMounts = append(container.VolumeMounts, volumeMount...) + } + + tproxyEnabled, err := transparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) + if err != nil { + return corev1.Container{}, err + } + + // If not running in transparent proxy mode and in an OpenShift environment, + // skip setting the security context and let OpenShift set it for us. + // When transparent proxy is enabled, then Envoy needs to run as our specific user + // so that traffic redirection will work. + if tproxyEnabled || !w.EnableOpenShift { + if pod.Spec.SecurityContext != nil { + // User container and Envoy container cannot have the same UID. + if pod.Spec.SecurityContext.RunAsUser != nil && *pod.Spec.SecurityContext.RunAsUser == envoyUserAndGroupID { + return corev1.Container{}, fmt.Errorf("pod security context cannot have the same uid as envoy: %v", envoyUserAndGroupID) + } + } + // Ensure that none of the user's containers have the same UID as Envoy. At this point in injection the meshWebhook + // has only injected init containers so all containers defined in pod.Spec.Containers are from the user. + for _, c := range pod.Spec.Containers { + // User container and Envoy container cannot have the same UID. + if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil && *c.SecurityContext.RunAsUser == envoyUserAndGroupID && c.Image != w.ImageEnvoy { + return corev1.Container{}, fmt.Errorf("container %q has runAsUser set to the same uid %q as envoy which is not allowed", c.Name, envoyUserAndGroupID) + } + } + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + RunAsGroup: pointer.Int64(envoyUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + } + } + + return container, nil +} +func (w *MeshWebhook) getContainerSidecarCommand(pod corev1.Pod, multiPortSvcName string, multiPortSvcIdx int) ([]string, error) { + bootstrapFile := "/consul/connect-inject/envoy-bootstrap.yaml" + if multiPortSvcName != "" { + bootstrapFile = fmt.Sprintf("/consul/connect-inject/envoy-bootstrap-%s.yaml", multiPortSvcName) + } + cmd := []string{ + "envoy", + "--config-path", bootstrapFile, + } + if multiPortSvcName != "" { + // --base-id is needed so multiple Envoy proxies can run on the same host. + cmd = append(cmd, "--base-id", fmt.Sprintf("%d", multiPortSvcIdx)) + } + + // Check to see if the user has overriden concurrency via an annotation. + if pod.Annotations[annotationEnvoyProxyConcurrency] != "" { + val, err := strconv.ParseInt(pod.Annotations[annotationEnvoyProxyConcurrency], 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse annotation: %s", annotationEnvoyProxyConcurrency) + } + if val < 0 { + return nil, fmt.Errorf("invalid envoy concurrency, must be >= 0: %s", pod.Annotations[annotationEnvoyProxyConcurrency]) + } else { + cmd = append(cmd, "--concurrency", pod.Annotations[annotationEnvoyProxyConcurrency]) + } + } else { + // Use the default concurrency. + cmd = append(cmd, "--concurrency", fmt.Sprintf("%d", w.DefaultEnvoyProxyConcurrency)) + } + + extraArgs, annotationSet := pod.Annotations[annotationEnvoyExtraArgs] + + if annotationSet || w.EnvoyExtraArgs != "" { + extraArgsToUse := w.EnvoyExtraArgs + + // Prefer args set by pod annotation over the flag to the consul-k8s binary (h.EnvoyExtraArgs). + if annotationSet { + extraArgsToUse = extraArgs + } + + // Split string into tokens. + // e.g. "--foo bar --boo baz" --> ["--foo", "bar", "--boo", "baz"] + tokens, err := shlex.Split(extraArgsToUse) + if err != nil { + return []string{}, err + } + for _, t := range tokens { + if strings.Contains(t, " ") { + t = strconv.Quote(t) + } + cmd = append(cmd, t) + } + } + return cmd, nil +} + +func (w *MeshWebhook) envoySidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { + resources := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + } + // zeroQuantity is used for comparison to see if a quantity was explicitly + // set. + var zeroQuantity resource.Quantity + + // NOTE: We only want to set the limit/request if the default or annotation + // was explicitly set. If it's not explicitly set, it will be the zero value + // which would show up in the pod spec as being explicitly set to zero if we + // set that key, e.g. "cpu" to zero. + // We want it to not show up in the pod spec at all if if it's not explicitly + // set so that users aren't wondering why it's set to 0 when they didn't specify + // a request/limit. If they have explicitly set it to 0 then it will be set + // to 0 in the pod spec because we're doing a comparison to the zero-valued + // struct. + + // CPU Limit. + if anno, ok := pod.Annotations[annotationSidecarProxyCPULimit]; ok { + cpuLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyCPULimit, anno, err) + } + resources.Limits[corev1.ResourceCPU] = cpuLimit + } else if w.DefaultProxyCPULimit != zeroQuantity { + resources.Limits[corev1.ResourceCPU] = w.DefaultProxyCPULimit + } + + // CPU Request. + if anno, ok := pod.Annotations[annotationSidecarProxyCPURequest]; ok { + cpuRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyCPURequest, anno, err) + } + resources.Requests[corev1.ResourceCPU] = cpuRequest + } else if w.DefaultProxyCPURequest != zeroQuantity { + resources.Requests[corev1.ResourceCPU] = w.DefaultProxyCPURequest + } + + // Memory Limit. + if anno, ok := pod.Annotations[annotationSidecarProxyMemoryLimit]; ok { + memoryLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyMemoryLimit, anno, err) + } + resources.Limits[corev1.ResourceMemory] = memoryLimit + } else if w.DefaultProxyMemoryLimit != zeroQuantity { + resources.Limits[corev1.ResourceMemory] = w.DefaultProxyMemoryLimit + } + + // Memory Request. + if anno, ok := pod.Annotations[annotationSidecarProxyMemoryRequest]; ok { + memoryRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyMemoryRequest, anno, err) + } + resources.Requests[corev1.ResourceMemory] = memoryRequest + } else if w.DefaultProxyMemoryRequest != zeroQuantity { + resources.Requests[corev1.ResourceMemory] = w.DefaultProxyMemoryRequest + } + + return resources, nil +} diff --git a/control-plane/connect-inject/envoy_sidecar_test.go b/control-plane/connect-inject/envoy_sidecar_test.go new file mode 100644 index 0000000000..cd83d74244 --- /dev/null +++ b/control-plane/connect-inject/envoy_sidecar_test.go @@ -0,0 +1,638 @@ +package connectinject + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +func TestHandlerEnvoySidecar(t *testing.T) { + require := require.New(t) + cases := map[string]struct { + annotations map[string]string + expCommand []string + expErr string + }{ + "default settings, no annotations": { + annotations: map[string]string{ + annotationService: "foo", + }, + expCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + }, + }, + "default settings, annotation override": { + annotations: map[string]string{ + annotationService: "foo", + annotationEnvoyProxyConcurrency: "42", + }, + expCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "42", + }, + }, + "default settings, invalid concurrency annotation negative number": { + annotations: map[string]string{ + annotationService: "foo", + annotationEnvoyProxyConcurrency: "-42", + }, + expErr: "invalid envoy concurrency, must be >= 0: -42", + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + h := MeshWebhook{} + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := h.envoySidecar(testNS, pod, multiPortInfo{}) + if c.expErr != "" { + require.Contains(err.Error(), c.expErr) + } else { + require.NoError(err) + require.Equal(c.expCommand, container.Command) + require.Equal(container.VolumeMounts, []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }) + } + }) + } +} + +func TestHandlerEnvoySidecar_Multiport(t *testing.T) { + require := require.New(t) + w := MeshWebhook{} + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "web,web-admin", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-admin", + }, + }, + }, + } + multiPortInfos := []multiPortInfo{ + { + serviceIndex: 0, + serviceName: "web", + }, + { + serviceIndex: 1, + serviceName: "web-admin", + }, + } + expCommand := map[int][]string{ + 0: {"envoy", "--config-path", "/consul/connect-inject/envoy-bootstrap-web.yaml", "--base-id", "0", "--concurrency", "0"}, + 1: {"envoy", "--config-path", "/consul/connect-inject/envoy-bootstrap-web-admin.yaml", "--base-id", "1", "--concurrency", "0"}, + } + for i := 0; i < 2; i++ { + container, err := w.envoySidecar(testNS, pod, multiPortInfos[i]) + require.NoError(err) + require.Equal(expCommand[i], container.Command) + + require.Equal(container.VolumeMounts, []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }) + } +} + +func TestHandlerEnvoySidecar_withSecurityContext(t *testing.T) { + cases := map[string]struct { + tproxyEnabled bool + openShiftEnabled bool + expSecurityContext *corev1.SecurityContext + }{ + "tproxy disabled; openshift disabled": { + tproxyEnabled: false, + openShiftEnabled: false, + expSecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + RunAsGroup: pointer.Int64(envoyUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + }, + }, + "tproxy enabled; openshift disabled": { + tproxyEnabled: true, + openShiftEnabled: false, + expSecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + RunAsGroup: pointer.Int64(envoyUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + }, + }, + "tproxy disabled; openshift enabled": { + tproxyEnabled: false, + openShiftEnabled: true, + expSecurityContext: nil, + }, + "tproxy enabled; openshift enabled": { + tproxyEnabled: true, + openShiftEnabled: true, + expSecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + RunAsGroup: pointer.Int64(envoyUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + }, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + w := MeshWebhook{ + EnableTransparentProxy: c.tproxyEnabled, + EnableOpenShift: c.openShiftEnabled, + } + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + ec, err := w.envoySidecar(testNS, pod, multiPortInfo{}) + require.NoError(t, err) + require.Equal(t, c.expSecurityContext, ec.SecurityContext) + }) + } +} + +// Test that if the user specifies a pod security context with the same uid as `envoyUserAndGroupID` that we return +// an error to the meshWebhook. +func TestHandlerEnvoySidecar_FailsWithDuplicatePodSecurityContextUID(t *testing.T) { + require := require.New(t) + w := MeshWebhook{} + pod := corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + }, + }, + } + _, err := w.envoySidecar(testNS, pod, multiPortInfo{}) + require.Error(err, fmt.Sprintf("pod security context cannot have the same uid as envoy: %v", envoyUserAndGroupID)) +} + +// Test that if the user specifies a container with security context with the same uid as `envoyUserAndGroupID` that we +// return an error to the meshWebhook. If a container using the envoy image has the same uid, we don't return an error +// because in multiport pod there can be multiple envoy sidecars. +func TestHandlerEnvoySidecar_FailsWithDuplicateContainerSecurityContextUID(t *testing.T) { + cases := []struct { + name string + pod corev1.Pod + webhook MeshWebhook + expErr bool + expErrMessage error + }{ + { + name: "fails with non envoy image", + pod: corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + // Setting RunAsUser: 1 should succeed. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(1), + }, + }, + { + Name: "app", + // Setting RunAsUser: 5995 should fail. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + }, + Image: "not-envoy", + }, + }, + }, + }, + webhook: MeshWebhook{}, + expErr: true, + expErrMessage: fmt.Errorf("container app has runAsUser set to the same uid %q as envoy which is not allowed", envoyUserAndGroupID), + }, + { + name: "doesn't fail with envoy image", + pod: corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + // Setting RunAsUser: 1 should succeed. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(1), + }, + }, + { + Name: "sidecar", + // Setting RunAsUser: 5995 should succeed if the image matches h.ImageEnvoy. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + }, + Image: "envoy", + }, + }, + }, + }, + webhook: MeshWebhook{ + ImageEnvoy: "envoy", + }, + expErr: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + _, err := tc.webhook.envoySidecar(testNS, tc.pod, multiPortInfo{}) + if tc.expErr { + require.Error(t, err, tc.expErrMessage) + } else { + require.NoError(t, err) + } + }) + } +} + +// Test that we can pass extra args to envoy via the extraEnvoyArgs flag +// or via pod annotations. When arguments are passed in both ways, the +// arguments set via pod annotations are used. +func TestHandlerEnvoySidecar_EnvoyExtraArgs(t *testing.T) { + cases := []struct { + name string + envoyExtraArgs string + pod *corev1.Pod + expectedContainerCommand []string + }{ + { + name: "no extra options provided", + envoyExtraArgs: "", + pod: &corev1.Pod{}, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + }, + }, + { + name: "via flag: extra log-level option", + envoyExtraArgs: "--log-level debug", + pod: &corev1.Pod{}, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + "--log-level", "debug", + }, + }, + { + name: "via flag: multiple arguments with quotes", + envoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + pod: &corev1.Pod{}, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + "--log-level", "debug", + "--admin-address-path", "\"/tmp/consul/foo bar\"", + }, + }, + { + name: "via annotation: multiple arguments with quotes", + envoyExtraArgs: "", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + }, + }, + }, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + "--log-level", "debug", + "--admin-address-path", "\"/tmp/consul/foo bar\"", + }, + }, + { + name: "via flag and annotation: should prefer setting via the annotation", + envoyExtraArgs: "this should be overwritten", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + }, + }, + }, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + "--log-level", "debug", + "--admin-address-path", "\"/tmp/consul/foo bar\"", + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h := MeshWebhook{ + ImageConsul: "hashicorp/consul:latest", + ImageEnvoy: "hashicorp/consul-k8s:latest", + EnvoyExtraArgs: tc.envoyExtraArgs, + } + + c, err := h.envoySidecar(testNS, *tc.pod, multiPortInfo{}) + require.NoError(t, err) + require.Equal(t, tc.expectedContainerCommand, c.Command) + }) + } +} + +func TestHandlerEnvoySidecar_UserVolumeMounts(t *testing.T) { + cases := []struct { + name string + pod corev1.Pod + expectedContainerVolumeMounts []corev1.VolumeMount + expErr string + }{ + { + name: "able to set a sidecar container volume mount via annotation", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + annotationConsulSidecarUserVolumeMount: "[{\"name\": \"tls-cert\", \"mountPath\": \"/custom/path\"}, {\"name\": \"tls-ca\", \"mountPath\": \"/custom/path2\"}]", + }, + }, + }, + expectedContainerVolumeMounts: []corev1.VolumeMount{ + { + Name: "consul-connect-inject-data", + MountPath: "/consul/connect-inject", + }, + { + Name: "tls-cert", + MountPath: "/custom/path", + }, + { + Name: "tls-ca", + MountPath: "/custom/path2", + }, + }, + }, + { + name: "invalid annotation results in error", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + annotationConsulSidecarUserVolumeMount: "[abcdefg]", + }, + }, + }, + expErr: "invalid character 'a' looking ", + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h := MeshWebhook{ + ImageConsul: "hashicorp/consul:latest", + ImageEnvoy: "hashicorp/consul-k8s:latest", + } + c, err := h.envoySidecar(testNS, tc.pod, multiPortInfo{}) + if tc.expErr == "" { + require.NoError(t, err) + require.Equal(t, tc.expectedContainerVolumeMounts, c.VolumeMounts) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expErr) + } + }) + } +} + +func TestHandlerEnvoySidecar_Resources(t *testing.T) { + mem1 := resource.MustParse("100Mi") + mem2 := resource.MustParse("200Mi") + cpu1 := resource.MustParse("100m") + cpu2 := resource.MustParse("200m") + zero := resource.MustParse("0") + + cases := map[string]struct { + webhook MeshWebhook + annotations map[string]string + expResources corev1.ResourceRequirements + expErr string + }{ + "no defaults, no annotations": { + webhook: MeshWebhook{}, + annotations: nil, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + "all defaults, no annotations": { + webhook: MeshWebhook{ + DefaultProxyCPURequest: cpu1, + DefaultProxyCPULimit: cpu2, + DefaultProxyMemoryRequest: mem1, + DefaultProxyMemoryLimit: mem2, + }, + annotations: nil, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "no defaults, all annotations": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyCPURequest: "100m", + annotationSidecarProxyMemoryRequest: "100Mi", + annotationSidecarProxyCPULimit: "200m", + annotationSidecarProxyMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "annotations override defaults": { + webhook: MeshWebhook{ + DefaultProxyCPURequest: zero, + DefaultProxyCPULimit: zero, + DefaultProxyMemoryRequest: zero, + DefaultProxyMemoryLimit: zero, + }, + annotations: map[string]string{ + annotationSidecarProxyCPURequest: "100m", + annotationSidecarProxyMemoryRequest: "100Mi", + annotationSidecarProxyCPULimit: "200m", + annotationSidecarProxyMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "defaults set to zero, no annotations": { + webhook: MeshWebhook{ + DefaultProxyCPURequest: zero, + DefaultProxyCPULimit: zero, + DefaultProxyMemoryRequest: zero, + DefaultProxyMemoryLimit: zero, + }, + annotations: nil, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "annotations set to 0": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyCPURequest: "0", + annotationSidecarProxyMemoryRequest: "0", + annotationSidecarProxyCPULimit: "0", + annotationSidecarProxyMemoryLimit: "0", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "invalid cpu request": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyCPURequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-request:\"invalid\": quantities must match the regular expression", + }, + "invalid cpu limit": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyCPULimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-limit:\"invalid\": quantities must match the regular expression", + }, + "invalid memory request": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyMemoryRequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-request:\"invalid\": quantities must match the regular expression", + }, + "invalid memory limit": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyMemoryLimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-limit:\"invalid\": quantities must match the regular expression", + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := c.webhook.envoySidecar(testNS, pod, multiPortInfo{}) + if c.expErr != "" { + require.NotNil(err) + require.Contains(err.Error(), c.expErr) + } else { + require.NoError(err) + require.Equal(c.expResources, container.Resources) + } + }) + } +} diff --git a/control-plane/connect-inject/mesh_webhook.go b/control-plane/connect-inject/mesh_webhook.go index 2867ad2f10..54c458af7f 100644 --- a/control-plane/connect-inject/mesh_webhook.go +++ b/control-plane/connect-inject/mesh_webhook.go @@ -9,11 +9,12 @@ import ( "path/filepath" "strconv" "strings" + "time" mapset "github.com/deckarep/golang-set" "github.com/go-logr/logr" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" + "github.com/hashicorp/consul/api" "gomodules.xyz/jsonpatch/v2" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -28,22 +29,17 @@ import ( // "system" level namespaces and are always skipped (never injected). var kubeSystemNamespaces = mapset.NewSetWith(metav1.NamespaceSystem, metav1.NamespacePublic) -// MeshWebhook is the HTTP meshWebhook for admission webhooks. +// Webhook is the HTTP meshWebhook for admission webhooks. type MeshWebhook struct { - Clientset kubernetes.Interface - - // ConsulClientConfig is the config to create a Consul API client. - ConsulConfig *consul.Config - - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager + ConsulClient *api.Client + Clientset kubernetes.Interface // ImageConsul is the container image for Consul to use. - // ImageConsulDataplane is the container image for Envoy to use. + // ImageEnvoy is the container image for Envoy to use. // // Both of these MUST be set. - ImageConsul string - ImageConsulDataplane string + ImageConsul string + ImageEnvoy string // ImageConsulK8S is the container image for consul-k8s to use. // This image is used for the consul-sidecar container. @@ -66,17 +62,6 @@ type MeshWebhook struct { // If not set, will use HTTP. ConsulCACert string - // TLSEnabled indicates whether we should use TLS for communicating to Consul. - TLSEnabled bool - - // ConsulAddress is the address of the Consul server. This should be only the - // host (i.e. not including port or protocol). - ConsulAddress string - - // ConsulTLSServerName is the SNI header to use to connect to the Consul servers - // over TLS. - ConsulTLSServerName string - // ConsulPartition is the name of the Admin Partition that the controller // is deployed in. It is an enterprise feature requiring Consul Enterprise 1.11+. // Its value is an empty string if partitions aren't enabled. @@ -162,23 +147,26 @@ type MeshWebhook struct { // from mesh services. EnableConsulDNS bool + // ResourcePrefix is the prefix used for the installation which is used to determine the Service + // name of the Consul DNS service. + ResourcePrefix string + // EnableOpenShift indicates that when tproxy is enabled, the security context for the Envoy and init // containers should not be added because OpenShift sets a random user for those and will not allow // those containers to be created otherwise. EnableOpenShift bool - // ReleaseNamespace is the Kubernetes namespace where this webhook is running. - ReleaseNamespace string + // ConsulAPITimeout is the duration that the consul API client will + // wait for a response from the API before cancelling the request. + ConsulAPITimeout time.Duration // Log Log logr.Logger - // Log settings for consul-dataplane and connect-init containers. + // Log settings for consul-sidecar LogLevel string LogJSON bool decoder *admission.Decoder - // etcResolvFile is only used in tests to stub out /etc/resolv.conf file. - etcResolvFile string } type multiPortInfo struct { serviceIndex int @@ -256,6 +244,10 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, containerEnvVars...) } + // Add the init container which copies the Consul binary to /consul/connect-inject/. + initCopyContainer := w.initCopyContainer() + pod.Spec.InitContainers = append(pod.Spec.InitContainers, initCopyContainer) + // A user can enable/disable tproxy for an entire namespace via a label. ns, err := w.Clientset.CoreV1().Namespaces().Get(ctx, req.Namespace, metav1.GetOptions{}) if err != nil { @@ -279,7 +271,7 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi pod.Spec.InitContainers = append(pod.Spec.InitContainers, initContainer) // Add the Envoy sidecar. - envoySidecar, err := w.consulDataplaneSidecar(*ns, pod, multiPortInfo{}) + envoySidecar, err := w.envoySidecar(*ns, pod, multiPortInfo{}) if err != nil { w.Log.Error(err, "error configuring injection sidecar container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection sidecar container: %s", err)) @@ -349,7 +341,7 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi pod.Spec.InitContainers = append(pod.Spec.InitContainers, initContainer) // Add the Envoy sidecar. - envoySidecar, err := w.consulDataplaneSidecar(*ns, pod, mpi) + envoySidecar, err := w.envoySidecar(*ns, pod, mpi) if err != nil { w.Log.Error(err, "error configuring injection sidecar container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection sidecar container: %s", err)) @@ -358,6 +350,26 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi } } + // Now that the consul-sidecar no longer needs to re-register services periodically + // (that functionality lives in the endpoints-controller), + // we only need the consul sidecar to run the metrics merging server. + // First, determine if we need to run the metrics merging server. + shouldRunMetricsMerging, err := w.MetricsConfig.shouldRunMergedMetricsServer(pod) + if err != nil { + w.Log.Error(err, "error determining if metrics merging server should be run", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error determining if metrics merging server should be run: %s", err)) + } + + // Add the consul-sidecar only if we need to run the metrics merging server. + if shouldRunMetricsMerging { + consulSidecar, err := w.consulSidecar(pod) + if err != nil { + w.Log.Error(err, "error configuring consul sidecar container", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring consul sidecar container: %s", err)) + } + pod.Spec.Containers = append(pod.Spec.Containers, consulSidecar) + } + // pod.Annotations has already been initialized by h.defaultAnnotations() // and does not need to be checked for being a nil value. pod.Annotations[keyInjectStatus] = injected @@ -369,20 +381,11 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi } // Add an annotation to the pod sets transparent-proxy-status to enabled or disabled. Used by the CNI plugin - // to determine if it should traffic redirect or not. + // to determine if it should traffic redirect or not if tproxyEnabled { pod.Annotations[keyTransparentProxyStatus] = enabled } - // If tproxy with DNS redirection is enabled, we want to configure dns on the pod. - if tproxyEnabled && w.EnableConsulDNS { - if err = w.configureDNS(&pod, req.Namespace); err != nil { - w.Log.Error(err, "error configuring DNS on the pod", "request name", req.Name) - return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring DNS on the pod: %s", err)) - } - - } - // Add annotations for metrics. if err = w.prometheusAnnotations(&pod); err != nil { w.Log.Error(err, "error configuring prometheus annotations", "request name", req.Name) @@ -414,6 +417,7 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi // plugin can apply redirect traffic rules on the pod. if w.EnableCNI && tproxyEnabled { if err := w.addRedirectTrafficConfigAnnotation(&pod, *ns); err != nil { + // todo: update this error message w.Log.Error(err, "error configuring annotation for CNI traffic redirection", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring annotation for CNI traffic redirection: %s", err)) } @@ -437,19 +441,7 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi // all patches are created to guarantee no errors were encountered in // that process before modifying the Consul cluster. if w.EnableNamespaces { - serverState, err := w.ConsulServerConnMgr.State() - if err != nil { - w.Log.Error(err, "error checking or creating namespace", - "ns", w.consulNamespace(req.Namespace), "request name", req.Name) - return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error checking or creating namespace: %s", err)) - } - apiClient, err := consul.NewClientFromConnMgrState(w.ConsulConfig, serverState) - if err != nil { - w.Log.Error(err, "error checking or creating namespace", - "ns", w.consulNamespace(req.Namespace), "request name", req.Name) - return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error checking or creating namespace: %s", err)) - } - if _, err := namespaces.EnsureExists(apiClient, w.consulNamespace(req.Namespace), w.CrossNamespaceACLPolicy); err != nil { + if _, err := namespaces.EnsureExists(w.ConsulClient, w.consulNamespace(req.Namespace), w.CrossNamespaceACLPolicy); err != nil { w.Log.Error(err, "error checking or creating namespace", "ns", w.consulNamespace(req.Namespace), "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error checking or creating namespace: %s", err)) @@ -487,7 +479,7 @@ func (w *MeshWebhook) overwriteProbes(ns corev1.Namespace, pod *corev1.Pod) erro if tproxyEnabled && overwriteProbes { for i, container := range pod.Spec.Containers { // skip the "envoy-sidecar" container from having it's probes overridden - if container.Name == sidecarContainer { + if container.Name == envoySidecarContainer { continue } if container.LivenessProbe != nil && container.LivenessProbe.HTTPGet != nil { @@ -627,11 +619,11 @@ func portValue(pod corev1.Pod, value string) (int32, error) { return int32(raw), err } -func findServiceAccountVolumeMount(pod corev1.Pod, multiPortSvcName string) (corev1.VolumeMount, string, error) { +func findServiceAccountVolumeMount(pod corev1.Pod, multiPort bool, multiPortSvcName string) (corev1.VolumeMount, string, error) { // In the case of a multiPort pod, there may be another service account // token mounted as a different volume. Its name must be -serviceaccount. // If not we'll fall back to the service account for the pod. - if multiPortSvcName != "" { + if multiPort { for _, v := range pod.Spec.Volumes { if v.Name == fmt.Sprintf("%s-service-account", multiPortSvcName) { mountPath := fmt.Sprintf("/consul/serviceaccount-%s", multiPortSvcName) diff --git a/control-plane/connect-inject/mesh_webhook_ent_test.go b/control-plane/connect-inject/mesh_webhook_ent_test.go index bfa406580b..7a34ee3d73 100644 --- a/control-plane/connect-inject/mesh_webhook_ent_test.go +++ b/control-plane/connect-inject/mesh_webhook_ent_test.go @@ -5,12 +5,13 @@ package connectinject import ( "context" "testing" + "time" "github.com/deckarep/golang-set" logrtest "github.com/go-logr/logr/testing" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" @@ -230,37 +231,47 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + require := require.New(t) - // Add the client config and watcher to the test's meshWebhook - tt.Webhook.ConsulConfig = testClient.Cfg - tt.Webhook.ConsulServerConnMgr = testClient.Watcher + // Set up consul server + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + a.WaitForSerfCheck(t) + defer a.Stop() + + // Set up consul client + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) + + // Add the client to the test's meshWebhook + tt.Webhook.ConsulClient = client // Mutate! resp := tt.Webhook.Handle(context.Background(), tt.Req) - require.Equal(t, resp.Allowed, true) + require.Equal(resp.Allowed, true) // Check all the namespace things // Check that we have the right number of namespaces namespaces, _, err := client.Namespaces().List(&api.QueryOptions{}) - require.NoError(t, err) - require.Len(t, namespaces, len(tt.ExpectedNamespaces)) + require.NoError(err) + require.Len(namespaces, len(tt.ExpectedNamespaces)) // Check the namespace details for _, ns := range tt.ExpectedNamespaces { actNamespace, _, err := client.Namespaces().Read(ns, &api.QueryOptions{}) - require.NoErrorf(t, err, "error getting namespace %s", ns) - require.NotNilf(t, actNamespace, "namespace %s was nil", ns) - require.Equalf(t, ns, actNamespace.Name, "namespace %s was improperly named", ns) + require.NoErrorf(err, "error getting namespace %s", ns) + require.NotNilf(actNamespace, "namespace %s was nil", ns) + require.Equalf(ns, actNamespace.Name, "namespace %s was improperly named", ns) // Check created namespace properties if ns != "default" { - require.Equalf(t, "Auto-generated by consul-k8s", actNamespace.Description, + require.Equalf("Auto-generated by consul-k8s", actNamespace.Description, "wrong namespace description for namespace %s", ns) - require.Containsf(t, actNamespace.Meta, "external-source", + require.Containsf(actNamespace.Meta, "external-source", "namespace %s does not contain external-source metadata key", ns) - require.Equalf(t, "kubernetes", actNamespace.Meta["external-source"], + require.Equalf("kubernetes", actNamespace.Meta["external-source"], "namespace %s has wrong value for external-source metadata key", ns) } @@ -485,16 +496,37 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { // Set up consul server - adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + a, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true - c.ACL.Tokens.InitialManagement = adminToken }) - client := testClient.APIClient + a.WaitForSerfCheck(t) + defer a.Stop() + + // Set up a client for bootstrapping + bootClient, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + // Bootstrap the server and get the bootstrap token + var bootstrapResp *api.ACLToken + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, t, func(r *retry.R) { + bootstrapResp, _, err = bootClient.ACL().Bootstrap() + require.NoError(r, err) + }) + bootstrapToken := bootstrapResp.SecretID + require.NotEmpty(t, bootstrapToken) + + // Set up consul client + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + Token: bootstrapToken, + }) + require.NoError(t, err) - // Add the client config and watcher to the test's meshWebhook - tt.Webhook.ConsulConfig = testClient.Cfg - tt.Webhook.ConsulServerConnMgr = testClient.Watcher + // Add the client to the test's meshWebhook + tt.Webhook.ConsulClient = client // Create cross namespace policy // This would have been created by the acl bootstrapper in the @@ -589,14 +621,24 @@ func TestHandler_MutateWithNamespaces_Annotation(t *testing.T) { for name, c := range cases { t.Run(name, func(t *testing.T) { - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) + require := require.New(t) + + // Set up consul server + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + a.WaitForSerfCheck(t) + defer a.Stop() s := runtime.NewScheme() s.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"}, &corev1.Pod{}) decoder, err := admission.NewDecoder(s) - require.NoError(t, err) + require.NoError(err) - require.NoError(t, err) + // Set up consul client + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) webhook := MeshWebhook{ Log: logrtest.TestLogger{T: t}, @@ -606,8 +648,7 @@ func TestHandler_MutateWithNamespaces_Annotation(t *testing.T) { ConsulDestinationNamespace: c.ConsulDestinationNamespace, EnableK8SNSMirroring: c.Mirroring, K8SNSMirroringPrefix: c.MirroringPrefix, - ConsulConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: client, decoder: decoder, Clientset: clientWithNamespace(sourceKubeNS), } @@ -631,7 +672,7 @@ func TestHandler_MutateWithNamespaces_Annotation(t *testing.T) { }, } resp := webhook.Handle(context.Background(), request) - require.Equal(t, resp.Allowed, true) + require.Equal(resp.Allowed, true) // Check that the annotation was added as a patch. var consulNamespaceAnnotationValue string @@ -644,8 +685,8 @@ func TestHandler_MutateWithNamespaces_Annotation(t *testing.T) { } } } - require.NotEmpty(t, consulNamespaceAnnotationValue, "no namespace annotation set") - require.Equal(t, c.ExpNamespaceAnnotation, consulNamespaceAnnotationValue) + require.NotEmpty(consulNamespaceAnnotationValue, "no namespace annotation set") + require.Equal(c.ExpNamespaceAnnotation, consulNamespaceAnnotationValue) }) } } diff --git a/control-plane/connect-inject/mesh_webhook_test.go b/control-plane/connect-inject/mesh_webhook_test.go index 7726d3e1db..cc7a9011c6 100644 --- a/control-plane/connect-inject/mesh_webhook_test.go +++ b/control-plane/connect-inject/mesh_webhook_test.go @@ -8,7 +8,6 @@ import ( mapset "github.com/deckarep/golang-set" logrtest "github.com/go-logr/logr/testing" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" "github.com/stretchr/testify/require" "gomodules.xyz/jsonpatch/v2" @@ -573,6 +572,85 @@ func TestHandlerHandle(t *testing.T) { }, }, }, + + { + "when metrics merging is enabled, we should inject the consul-sidecar and add prometheus annotations", + MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "testLabel": "123", + }, + Annotations: map[string]string{ + annotationServiceMetricsPort: "1234", + }, + }, + Spec: basicSpec, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/spec/containers/2", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(annotationPrometheusScrape), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(annotationPrometheusPath), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(annotationPrometheusPort), + }, + { + Operation: "add", + Path: "/metadata/labels/" + escapeJSONPointer(keyInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/labels/" + escapeJSONPointer(keyManagedBy), + }, + }, + }, + { "tproxy with overwriteProbes is enabled", MeshWebhook{ @@ -773,14 +851,14 @@ func TestHandlerHandle(t *testing.T) { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - tt.Webhook.ConsulConfig = &consul.Config{HTTPPort: 8500} + require := require.New(t) ctx := context.Background() resp := tt.Webhook.Handle(ctx, tt.Req) if (tt.Err == "") != resp.Allowed { t.Fatalf("allowed: %v, expected err: %v", resp.Allowed, tt.Err) } if tt.Err != "" { - require.Contains(t, resp.Result.Message, tt.Err) + require.Contains(resp.Result.Message, tt.Err) return } @@ -790,7 +868,7 @@ func TestHandlerHandle(t *testing.T) { actual[i].Value = nil } } - require.ElementsMatch(t, tt.Patches, actual) + require.ElementsMatch(tt.Patches, actual) }) } } @@ -1693,7 +1771,7 @@ func TestOverwriteProbes(t *testing.T) { overwriteProbes: true, podContainers: []corev1.Container{ { - Name: sidecarContainer, + Name: envoySidecarContainer, }, }, }, diff --git a/control-plane/connect-inject/metrics_configuration.go b/control-plane/connect-inject/metrics_configuration.go index 1da82c59b9..fc8c5d574a 100644 --- a/control-plane/connect-inject/metrics_configuration.go +++ b/control-plane/connect-inject/metrics_configuration.go @@ -11,7 +11,6 @@ import ( // MetricsConfig represents configuration common to connect-inject components related to metrics. type MetricsConfig struct { DefaultEnableMetrics bool - EnableGatewayMetrics bool DefaultEnableMetricsMerging bool DefaultMergedMetricsPort string DefaultPrometheusScrapePort string diff --git a/control-plane/connect-inject/peering_acceptor_controller.go b/control-plane/connect-inject/peering_acceptor_controller.go index dca7d989dd..1125c96b51 100644 --- a/control-plane/connect-inject/peering_acceptor_controller.go +++ b/control-plane/connect-inject/peering_acceptor_controller.go @@ -9,7 +9,6 @@ import ( "github.com/go-logr/logr" consulv1alpha1 "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul/api" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -30,18 +29,14 @@ import ( // PeeringAcceptorController reconciles a PeeringAcceptor object. type PeeringAcceptorController struct { client.Client - // ConsulClientConfig is the config to create a Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager - // ExposeServersServiceName is the Kubernetes service name that the Consul servers are using. - ExposeServersServiceName string - // ReleaseNamespace is the namespace where this controller is deployed. - ReleaseNamespace string - // Log is the logger for this controller - Log logr.Logger - // Scheme is the API scheme that this controller should have. - Scheme *runtime.Scheme + // ConsulClient points at the agent local to the connect-inject deployment pod. + ConsulClient *api.Client + ExposeServersServiceName string + ReadServerExternalService bool + TokenServerAddresses []string + ReleaseNamespace string + Log logr.Logger + Scheme *runtime.Scheme context.Context } @@ -88,18 +83,6 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } - // Create Consul client for this reconcile. - serverState, err := r.ConsulServerConnMgr.State() - if err != nil { - r.Log.Error(err, "failed to get Consul server state", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - apiClient, err := consul.NewClientFromConnMgrState(r.ConsulClientConfig, serverState) - if err != nil { - r.Log.Error(err, "failed to create Consul API client", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - // The DeletionTimestamp is zero when the object has not been marked for deletion. The finalizer is added // in case it does not exist to all resources. If the DeletionTimestamp is non-zero, the object has been // marked for deletion and goes into the deletion workflow. @@ -113,7 +96,7 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ } else { if containsString(acceptor.Finalizers, FinalizerName) { r.Log.Info("PeeringAcceptor was deleted, deleting from Consul", "name", req.Name, "ns", req.Namespace) - err := r.deletePeering(ctx, apiClient, req.Name) + err := r.deletePeering(ctx, req.Name) if acceptor.Secret().Backend == "kubernetes" { err = r.deleteK8sSecret(ctx, acceptor.Secret().Name, acceptor.Namespace) } @@ -126,6 +109,19 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ } } + // Scrape the address of the server service + var serverExternalAddresses []string + if r.ReadServerExternalService { + addrs, err := r.getExposeServersServiceAddresses() + if err != nil { + r.updateStatusError(ctx, acceptor, KubernetesError, err) + return ctrl.Result{}, err + } + serverExternalAddresses = addrs + } else if len(r.TokenServerAddresses) > 0 { + serverExternalAddresses = r.TokenServerAddresses + } + // existingSecret will be nil if it doesn't exist, and have the contents of the secret if it does exist. existingSecret, err := r.getExistingSecret(ctx, acceptor.Secret().Name, acceptor.Namespace) if err != nil { @@ -135,7 +131,7 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ } // Read the peering from Consul. - peering, _, err := apiClient.Peerings().Read(ctx, acceptor.Name, nil) + peering, _, err := r.ConsulClient.Peerings().Read(ctx, acceptor.Name, nil) if err != nil { r.Log.Error(err, "failed to get Peering from Consul", "name", req.Name) return ctrl.Result{}, err @@ -155,7 +151,7 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ } // Generate and store the peering token. var resp *api.PeeringGenerateTokenResponse - if resp, err = r.generateToken(ctx, apiClient, acceptor.Name); err != nil { + if resp, err = r.generateToken(ctx, acceptor.Name, serverExternalAddresses); err != nil { r.updateStatusError(ctx, acceptor, ConsulAgentError, err) return ctrl.Result{}, err } @@ -185,7 +181,7 @@ func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Requ // Generate and store the peering token. var resp *api.PeeringGenerateTokenResponse r.Log.Info("generating new token for an existing peering") - if resp, err = r.generateToken(ctx, apiClient, acceptor.Name); err != nil { + if resp, err = r.generateToken(ctx, acceptor.Name, serverExternalAddresses); err != nil { return ctrl.Result{}, err } if acceptor.Secret().Backend == "kubernetes" { @@ -344,11 +340,14 @@ func (r *PeeringAcceptorController) SetupWithManager(mgr ctrl.Manager) error { } // generateToken is a helper function that calls the Consul api to generate a token for the peer. -func (r *PeeringAcceptorController) generateToken(ctx context.Context, apiClient *api.Client, peerName string) (*api.PeeringGenerateTokenResponse, error) { +func (r *PeeringAcceptorController) generateToken(ctx context.Context, peerName string, serverExternalAddresses []string) (*api.PeeringGenerateTokenResponse, error) { req := api.PeeringGenerateTokenRequest{ PeerName: peerName, } - resp, _, err := apiClient.Peerings().GenerateToken(ctx, req, nil) + if len(serverExternalAddresses) > 0 { + req.ServerExternalAddresses = serverExternalAddresses + } + resp, _, err := r.ConsulClient.Peerings().GenerateToken(ctx, req, nil) if err != nil { r.Log.Error(err, "failed to get generate token", "err", err) return nil, err @@ -357,8 +356,8 @@ func (r *PeeringAcceptorController) generateToken(ctx context.Context, apiClient } // deletePeering is a helper function that calls the Consul api to delete a peering. -func (r *PeeringAcceptorController) deletePeering(ctx context.Context, apiClient *api.Client, peerName string) error { - _, err := apiClient.Peerings().Delete(ctx, peerName, nil) +func (r *PeeringAcceptorController) deletePeering(ctx context.Context, peerName string) error { + _, err := r.ConsulClient.Peerings().Delete(ctx, peerName, nil) if err != nil { r.Log.Error(err, "failed to delete Peering from Consul", "name", peerName) return err @@ -390,6 +389,73 @@ func (r *PeeringAcceptorController) requestsForPeeringTokens(object client.Objec return []ctrl.Request{} } +func (r *PeeringAcceptorController) getExposeServersServiceAddresses() ([]string, error) { + r.Log.Info("getting external address from expose-servers service", "name", r.ExposeServersServiceName) + var serverExternalAddresses []string + + serverService := &corev1.Service{} + key := types.NamespacedName{ + Name: r.ExposeServersServiceName, + Namespace: r.ReleaseNamespace, + } + err := r.Client.Get(r.Context, key, serverService) + if err != nil { + return nil, err + } + switch serverService.Spec.Type { + case corev1.ServiceTypeNodePort: + nodes := corev1.NodeList{} + err := r.Client.List(r.Context, &nodes) + if err != nil { + return nil, err + } + if len(nodes.Items) == 0 { + return nil, fmt.Errorf("no nodes were found for scraping server addresses from expose-servers service") + } + var grpcNodePort int32 + for _, port := range serverService.Spec.Ports { + if port.Name == "grpc" { + grpcNodePort = port.NodePort + } + } + if grpcNodePort == 0 { + return nil, fmt.Errorf("no grpc port was found for expose-servers service") + } + for _, node := range nodes.Items { + addrs := node.Status.Addresses + for _, addr := range addrs { + if addr.Type == corev1.NodeInternalIP { + serverExternalAddresses = append(serverExternalAddresses, fmt.Sprintf("%s:%d", addr.Address, grpcNodePort)) + } + } + } + if len(serverExternalAddresses) == 0 { + return nil, fmt.Errorf("no server addresses were scraped from expose-servers service") + } + return serverExternalAddresses, nil + case corev1.ServiceTypeLoadBalancer: + lbAddrs := serverService.Status.LoadBalancer.Ingress + if len(lbAddrs) < 1 { + return nil, fmt.Errorf("unable to find load balancer address for %s service, retrying", r.ExposeServersServiceName) + } + for _, lbAddr := range lbAddrs { + // When the service is of type load balancer, the grpc port is hardcoded to 8502. + if lbAddr.IP != "" { + serverExternalAddresses = append(serverExternalAddresses, fmt.Sprintf("%s:%s", lbAddr.IP, "8502")) + } + if lbAddr.Hostname != "" { + serverExternalAddresses = append(serverExternalAddresses, fmt.Sprintf("%s:%s", lbAddr.Hostname, "8502")) + } + } + if len(serverExternalAddresses) == 0 { + return nil, fmt.Errorf("unable to find load balancer address for %s service, retrying", r.ExposeServersServiceName) + } + default: + return nil, fmt.Errorf("only NodePort and LoadBalancer service types are supported") + } + return serverExternalAddresses, nil +} + // filterPeeringAcceptors receives meta and object information for Kubernetes resources that are being watched, // which in this case are Secrets. It only returns true if the Secret is a Peering Token Secret. It reads the labels // from the meta of the resource and uses the values of the "consul.hashicorp.com/peering-token" label to validate that diff --git a/control-plane/connect-inject/peering_acceptor_controller_test.go b/control-plane/connect-inject/peering_acceptor_controller_test.go index 02118cd171..7e649c2394 100644 --- a/control-plane/connect-inject/peering_acceptor_controller_test.go +++ b/control-plane/connect-inject/peering_acceptor_controller_test.go @@ -9,8 +9,8 @@ import ( logrtest "github.com/go-logr/logr/testing" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -28,15 +28,19 @@ import ( // TestReconcile_CreateUpdatePeeringAcceptor creates a peering acceptor. func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { - name string - k8sObjects func() []runtime.Object - expectedConsulPeerings []*api.Peering - expectedK8sSecrets func() []*corev1.Secret - expErr string - expectedStatus *v1alpha1.PeeringAcceptorStatus - expectDeletedK8sSecret *types.NamespacedName - initialConsulPeerName string + name string + k8sObjects func() []runtime.Object + expectedConsulPeerings []*api.Peering + expectedK8sSecrets func() []*corev1.Secret + expErr string + expectedStatus *v1alpha1.PeeringAcceptorStatus + expectDeletedK8sSecret *types.NamespacedName + initialConsulPeerName string + externalAddresses []string + readServerExposeService bool + expectedTokenAddresses []string }{ { name: "New PeeringAcceptor creates a peering in Consul and generates a token", @@ -86,7 +90,9 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { }, }, { - name: "PeeringAcceptor generates a token with expose server addresses", + name: "PeeringAcceptor generates a token with expose server addresses", + readServerExposeService: true, + expectedTokenAddresses: []string{"1.1.1.1:8503"}, k8sObjects: func() []runtime.Object { service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -150,6 +156,55 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { return []*corev1.Secret{secret} }, }, + { + name: "PeeringAcceptor generates a token with external addresses specified", + externalAddresses: []string{"1.1.1.1:8503", "2.2.2.2:8503"}, + expectedTokenAddresses: []string{"1.1.1.1:8503", "2.2.2.2:8503"}, + k8sObjects: func() []runtime.Object { + acceptor := &v1alpha1.PeeringAcceptor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acceptor-created", + Namespace: "default", + }, + Spec: v1alpha1.PeeringAcceptorSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "data", + Backend: "kubernetes", + }, + }, + }, + } + return []runtime.Object{acceptor} + }, + expectedStatus: &v1alpha1.PeeringAcceptorStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "data", + Backend: "kubernetes", + }, + }, + }, + expectedConsulPeerings: []*api.Peering{ + { + Name: "acceptor-created", + }, + }, + expectedK8sSecrets: func() []*corev1.Secret { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acceptor-created-secret", + Namespace: "default", + }, + StringData: map[string]string{ + "data": "tokenstub", + }, + } + return []*corev1.Secret{secret} + }, + }, { name: "When the secret already exists (not created by controller), it is updated with the contents of the new peering token and an owner reference is added", k8sObjects: func() []runtime.Object { @@ -502,24 +557,35 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) if tt.initialConsulPeerName != "" { // Add the initial peerings into Consul by calling the Generate token endpoint. - _, _, err := consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: tt.initialConsulPeerName}, nil) + _, _, err = consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: tt.initialConsulPeerName}, nil) require.NoError(t, err) } // Create the peering acceptor controller controller := &PeeringAcceptorController{ - Client: fakeClient, - ExposeServersServiceName: "test-expose-servers", - ReleaseNamespace: "default", - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + Client: fakeClient, + TokenServerAddresses: tt.externalAddresses, + ReadServerExternalService: tt.readServerExposeService, + ExposeServersServiceName: "test-expose-servers", + ReleaseNamespace: "default", + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "acceptor-created", @@ -564,9 +630,14 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { decodedTokenData, err := base64.StdEncoding.DecodeString(string(createdSecret.Data["data"])) require.NoError(t, err) - require.Contains(t, string(decodedTokenData), "\"CA\":") + require.Contains(t, string(decodedTokenData), "\"CA\":null") require.Contains(t, string(decodedTokenData), "\"ServerAddresses\"") - require.Contains(t, string(decodedTokenData), "\"ServerName\":\"server.dc1.peering.11111111-2222-3333-4444-555555555555.consul\"") + require.Contains(t, string(decodedTokenData), "\"ServerName\":\"server.dc1.consul\"") + if len(tt.expectedTokenAddresses) > 0 { + for _, addr := range tt.externalAddresses { + require.Contains(t, string(decodedTokenData), addr) + } + } // Get the reconciled PeeringAcceptor and make assertions on the status acceptor := &v1alpha1.PeeringAcceptor{} @@ -622,22 +693,30 @@ func TestReconcile_DeletePeeringAcceptor(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() - // Create test consulServer server // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = "test-node" + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) // Add the initial peerings into Consul by calling the Generate token endpoint. - _, _, err := consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "acceptor-deleted"}, nil) + _, _, err = consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "acceptor-deleted"}, nil) require.NoError(t, err) // Create the peering acceptor controller. controller := &PeeringAcceptorController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "acceptor-deleted", @@ -671,6 +750,7 @@ func TestReconcile_DeletePeeringAcceptor(t *testing.T) { // scenarios involving the user setting the version annotation. func TestReconcile_VersionAnnotation(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := map[string]struct { annotations map[string]string expErr string @@ -769,19 +849,28 @@ func TestReconcile_VersionAnnotation(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) - _, _, err := consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "acceptor-created"}, nil) + _, _, err = consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "acceptor-created"}, nil) require.NoError(t, err) // Create the peering acceptor controller controller := &PeeringAcceptorController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "acceptor-created", @@ -1016,7 +1105,7 @@ func TestAcceptorUpdateStatus(t *testing.T) { }, }, Conditions: v1alpha1.Conditions{ - v1alpha1.Condition{ + { Type: v1alpha1.ConditionSynced, Status: corev1.ConditionTrue, }, @@ -1059,7 +1148,7 @@ func TestAcceptorUpdateStatus(t *testing.T) { }, }, Conditions: v1alpha1.Conditions{ - v1alpha1.Condition{ + { Type: v1alpha1.ConditionSynced, Status: corev1.ConditionTrue, }, @@ -1101,6 +1190,7 @@ func TestAcceptorUpdateStatus(t *testing.T) { require.Equal(t, tt.expStatus.SecretRef.Backend, acceptor.SecretRef().Backend) require.Equal(t, tt.expStatus.SecretRef.ResourceVersion, acceptor.SecretRef().ResourceVersion) require.Equal(t, tt.expStatus.Conditions[0].Message, acceptor.Status.Conditions[0].Message) + }) } } @@ -1132,7 +1222,7 @@ func TestAcceptorUpdateStatusError(t *testing.T) { reconcileErr: errors.New("this is an error"), expStatus: v1alpha1.PeeringAcceptorStatus{ Conditions: v1alpha1.Conditions{ - v1alpha1.Condition{ + { Type: v1alpha1.ConditionSynced, Status: corev1.ConditionFalse, Reason: InternalError, @@ -1485,3 +1575,301 @@ func TestAcceptor_RequestsForPeeringTokens(t *testing.T) { }) } } + +func TestGetExposeServersServiceAddress(t *testing.T) { + t.Parallel() + cases := []struct { + name string + k8sObjects func() []runtime.Object + releaseNamespace string + expAddresses []string + expErr string + }{ + { + name: "Valid LoadBalancer service", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + IP: "1.2.3.4", + }, + }, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expAddresses: []string{"1.2.3.4:8502"}, + }, + { + name: "Valid LoadBalancer service with Hostname", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + Hostname: "foo.bar.baz", + }, + }, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expAddresses: []string{"foo.bar.baz:8502"}, + }, + { + name: "LoadBalancer has no addresses", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{}, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expErr: "unable to find load balancer address for test-expose-servers service, retrying", + }, + { + name: "LoadBalancer has empty IP", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + IP: "", + }, + }, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expErr: "unable to find load balancer address for test-expose-servers service, retrying", + }, + { + name: "Valid NodePort service", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "", "10.1.1.1") + node2 := createNode("fake-gke-node2", "", "10.2.2.2") + node3 := createNode("fake-gke-node3", "", "10.3.3.3") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expAddresses: []string{"10.1.1.1:30100", "10.2.2.2:30100", "10.3.3.3:30100"}, + }, + { + name: "Valid NodePort service ignores node external IPs", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "30.1.1.1", "10.1.1.1") + node2 := createNode("fake-gke-node2", "30.2.2.2", "10.2.2.2") + node3 := createNode("fake-gke-node3", "30.3.3.3", "10.3.3.3") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expAddresses: []string{"10.1.1.1:30100", "10.2.2.2:30100", "10.3.3.3:30100"}, + }, + { + name: "Invalid NodePort service with only external IPs", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "30.1.1.1", "") + node2 := createNode("fake-gke-node2", "30.2.2.2", "") + node3 := createNode("fake-gke-node3", "30.3.3.3", "") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expErr: "no server addresses were scraped from expose-servers service", + }, + { + name: "Invalid NodePort service because no nodes exist to scrape addresses from", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + return []runtime.Object{exposeServersService} + }, + expErr: "no nodes were found for scraping server addresses from expose-servers service", + }, + { + name: "Invalid NodePort service because no grpc port exists", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "not-grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "30.1.1.1", "10.1.1.1") + node2 := createNode("fake-gke-node2", "30.2.2.2", "10.2.2.2") + node3 := createNode("fake-gke-node3", "30.3.3.3", "10.3.3.3") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expErr: "no grpc port was found for expose-servers service", + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // Add the default namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + nsTest := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test"}} + // Create fake k8s client + k8sObjects := append(tt.k8sObjects(), &ns, &nsTest) + + s := scheme.Scheme + //s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() + + // Create the peering acceptor controller + controller := &PeeringAcceptorController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + Scheme: s, + ReleaseNamespace: tt.releaseNamespace, + ExposeServersServiceName: "test-expose-servers", + } + + // Get addresses from expose-servers service. + addrs, err := controller.getExposeServersServiceAddresses() + if tt.expErr != "" { + require.EqualError(t, err, tt.expErr) + } else { + require.NoError(t, err) + } + + // Assert all the expected addresses are there. + for _, expAddr := range tt.expAddresses { + require.Contains(t, addrs, expAddr) + } + }) + } +} + +// createNode is a test helper to create Kubernetes nodes. +func createNode(name, externalIP, internalIP string) *corev1.Node { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{}, + }, + } + if externalIP != "" { + node.Status.Addresses = append(node.Status.Addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: externalIP}) + } + if internalIP != "" { + node.Status.Addresses = append(node.Status.Addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: internalIP}) + } + return node +} diff --git a/control-plane/connect-inject/peering_dialer_controller.go b/control-plane/connect-inject/peering_dialer_controller.go index ddf3af55d9..aa1fb4e0db 100644 --- a/control-plane/connect-inject/peering_dialer_controller.go +++ b/control-plane/connect-inject/peering_dialer_controller.go @@ -9,7 +9,6 @@ import ( "github.com/go-logr/logr" consulv1alpha1 "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul/api" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -30,14 +29,10 @@ import ( // PeeringDialerController reconciles a PeeringDialer object. type PeeringDialerController struct { client.Client - // ConsulClientConfig is the config to create a Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager - // Log is the logger for this controller. - Log logr.Logger - // Scheme is the API scheme that this controller should have. - Scheme *runtime.Scheme + // ConsulClient points at the agent local to the connect-inject deployment pod. + ConsulClient *api.Client + Log logr.Logger + Scheme *runtime.Scheme context.Context } @@ -63,18 +58,6 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } - // Create Consul client for this reconcile. - serverState, err := r.ConsulServerConnMgr.State() - if err != nil { - r.Log.Error(err, "failed to get Consul server state", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - apiClient, err := consul.NewClientFromConnMgrState(r.ConsulClientConfig, serverState) - if err != nil { - r.Log.Error(err, "failed to create Consul API client", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - // The DeletionTimestamp is zero when the object has not been marked for deletion. The finalizer is added // in case it does not exist to all resources. If the DeletionTimestamp is non-zero, the object has been // marked for deletion and goes into the deletion workflow. @@ -88,7 +71,7 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques } else { if containsString(dialer.Finalizers, FinalizerName) { r.Log.Info("PeeringDialer was deleted, deleting from Consul", "name", req.Name, "ns", req.Namespace) - if err := r.deletePeering(ctx, apiClient, req.Name); err != nil { + if err := r.deletePeering(ctx, req.Name); err != nil { return ctrl.Result{}, err } controllerutil.RemoveFinalizer(dialer, FinalizerName) @@ -135,7 +118,7 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques // correct secret specified in the spec. r.Log.Info("the secret in status.secretRef doesn't exist or wasn't set, establishing peering with the existing spec.peer.secret", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) peeringToken := specSecret.Data[dialer.Secret().Key] - if err := r.establishPeering(ctx, apiClient, dialer.Name, string(peeringToken)); err != nil { + if err := r.establishPeering(ctx, dialer.Name, string(peeringToken)); err != nil { r.updateStatusError(ctx, dialer, ConsulAgentError, err) return ctrl.Result{}, err } else { @@ -148,7 +131,7 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques // Read the peering from Consul. r.Log.Info("reading peering from Consul", "name", dialer.Name) - peering, _, err := apiClient.Peerings().Read(ctx, dialer.Name, nil) + peering, _, err := r.ConsulClient.Peerings().Read(ctx, dialer.Name, nil) if err != nil { r.Log.Error(err, "failed to get Peering from Consul", "name", req.Name) return ctrl.Result{}, err @@ -158,7 +141,7 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques if peering == nil { r.Log.Info("status.secret exists, but the peering doesn't exist in Consul; establishing peering with the existing spec.peer.secret", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) peeringToken := specSecret.Data[dialer.Secret().Key] - if err := r.establishPeering(ctx, apiClient, dialer.Name, string(peeringToken)); err != nil { + if err := r.establishPeering(ctx, dialer.Name, string(peeringToken)); err != nil { r.updateStatusError(ctx, dialer, ConsulAgentError, err) return ctrl.Result{}, err } else { @@ -172,7 +155,7 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques if r.specStatusSecretsDifferent(dialer, specSecret) { r.Log.Info("the spec.peer.secret is different from the status secret, re-establishing peering", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) peeringToken := specSecret.Data[dialer.Secret().Key] - if err := r.establishPeering(ctx, apiClient, dialer.Name, string(peeringToken)); err != nil { + if err := r.establishPeering(ctx, dialer.Name, string(peeringToken)); err != nil { r.updateStatusError(ctx, dialer, ConsulAgentError, err) return ctrl.Result{}, err } else { @@ -184,7 +167,7 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques if updated, err := r.versionAnnotationUpdated(dialer); err == nil && updated { r.Log.Info("the version annotation was incremented; re-establishing peering with spec.peer.secret", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) peeringToken := specSecret.Data[dialer.Secret().Key] - if err := r.establishPeering(ctx, apiClient, dialer.Name, string(peeringToken)); err != nil { + if err := r.establishPeering(ctx, dialer.Name, string(peeringToken)); err != nil { r.updateStatusError(ctx, dialer, ConsulAgentError, err) return ctrl.Result{}, err } else { @@ -275,12 +258,12 @@ func (r *PeeringDialerController) SetupWithManager(mgr ctrl.Manager) error { } // establishPeering is a helper function that calls the Consul api to generate a token for the peer. -func (r *PeeringDialerController) establishPeering(ctx context.Context, apiClient *api.Client, peerName string, peeringToken string) error { +func (r *PeeringDialerController) establishPeering(ctx context.Context, peerName string, peeringToken string) error { req := api.PeeringEstablishRequest{ PeerName: peerName, PeeringToken: peeringToken, } - _, _, err := apiClient.Peerings().Establish(ctx, req, nil) + _, _, err := r.ConsulClient.Peerings().Establish(ctx, req, nil) if err != nil { r.Log.Error(err, "failed to initiate peering", "err", err) return err @@ -289,8 +272,8 @@ func (r *PeeringDialerController) establishPeering(ctx context.Context, apiClien } // deletePeering is a helper function that calls the Consul api to delete a peering. -func (r *PeeringDialerController) deletePeering(ctx context.Context, apiClient *api.Client, peerName string) error { - _, err := apiClient.Peerings().Delete(ctx, peerName, nil) +func (r *PeeringDialerController) deletePeering(ctx context.Context, peerName string) error { + _, err := r.ConsulClient.Peerings().Delete(ctx, peerName, nil) if err != nil { r.Log.Error(err, "failed to delete Peering from Consul", "name", peerName) return err diff --git a/control-plane/connect-inject/peering_dialer_controller_test.go b/control-plane/connect-inject/peering_dialer_controller_test.go index 8bb6d4d29a..120618eafd 100644 --- a/control-plane/connect-inject/peering_dialer_controller_test.go +++ b/control-plane/connect-inject/peering_dialer_controller_test.go @@ -8,13 +8,9 @@ import ( logrtest "github.com/go-logr/logr/testing" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" - "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,6 +26,8 @@ import ( // TestReconcile_CreateUpdatePeeringDialer creates a peering dialer. func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { t.Parallel() + nodeName := "test-node" + node2Name := "test-node2" cases := map[string]struct { peeringName string k8sObjects func() []runtime.Object @@ -252,10 +250,7 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { // Create test consul server. acceptorPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - // We set the datacenter because the server name, typically formatted as "server.." - // must be unique on the acceptor and dialer peers. Otherwise the following consul error will be thrown: - // https://github.com/hashicorp/consul/blob/74b87d49d33069a048aead7a86d85d4b4b6461b5/agent/rpc/peering/service.go#L491. - c.Datacenter = "acceptor-dc" + c.NodeName = nodeName }) require.NoError(t, err) defer acceptorPeerServer.Stop() @@ -291,16 +286,23 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { } // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - dialerClient := testClient.APIClient + dialerPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = node2Name + }) + require.NoError(t, err) + defer dialerPeerServer.Stop() + dialerPeerServer.WaitForServiceIntentions(t) + + cfg = &api.Config{ + Address: dialerPeerServer.HTTPAddr, + } + dialerClient, err := api.NewClient(cfg) + require.NoError(t, err) // If the peering is supposed to already exist in Consul, then establish a peering with the existing token, so the peering will exist on the dialing side. if tt.peeringExists { - retry.Run(t, func(r *retry.R) { - _, _, err = dialerClient.Peerings().Establish(context.Background(), api.PeeringEstablishRequest{PeerName: tt.peeringName, PeeringToken: encodedPeeringToken}, nil) - require.NoError(r, err) - }) - + _, _, err := dialerClient.Peerings().Establish(context.Background(), api.PeeringEstablishRequest{PeerName: tt.peeringName, PeeringToken: encodedPeeringToken}, nil) + require.NoError(t, err) k8sObjects = append(k8sObjects, createSecret("dialer-token-old", "default", "token", "old-token")) // Create a new token to be used by Reconcile(). The original token has already been // used once to simulate establishing an existing peering. @@ -316,11 +318,10 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { // Create the peering dialer controller controller := &PeeringDialerController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: dialerClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "peering", @@ -365,6 +366,8 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { t.Parallel() + nodeName := "test-node" + node2Name := "test-node2" cases := map[string]struct { annotations map[string]string expErr string @@ -427,12 +430,7 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { // Create test consul server. acceptorPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - // We set different cluster id for the connect CA because the server name, - // typically formatted as server.dc1.peering..consul - // must be unique on the acceptor and dialer peers. - c.Connect["ca_config"] = map[string]interface{}{ - "cluster_id": "00000000-2222-3333-4444-555555555555", - } + c.NodeName = nodeName }) require.NoError(t, err) defer acceptorPeerServer.Stop() @@ -482,34 +480,22 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { require.NoError(t, err) // Create test consul server. - var testServerCfg *testutil.TestServerConfig dialerPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - testServerCfg = c + c.NodeName = node2Name }) require.NoError(t, err) defer dialerPeerServer.Stop() dialerPeerServer.WaitForServiceIntentions(t) - consulConfig := &consul.Config{ - APIClientConfig: &api.Config{Address: dialerPeerServer.HTTPAddr}, - HTTPPort: testServerCfg.Ports.HTTP, + cfg = &api.Config{ + Address: dialerPeerServer.HTTPAddr, } - dialerClient, err := api.NewClient(consulConfig.APIClientConfig) + dialerClient, err := api.NewClient(cfg) require.NoError(t, err) - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - watcher, err := discovery.NewWatcher(ctx, discovery.Config{Addresses: "127.0.0.1", GRPCPort: testServerCfg.Ports.GRPC}, hclog.NewNullLogger()) - require.NoError(t, err) - t.Cleanup(watcher.Stop) - go watcher.Run() - // Establish a peering with the generated token. - retry.Run(t, func(r *retry.R) { - _, _, err = dialerClient.Peerings().Establish(context.Background(), api.PeeringEstablishRequest{PeerName: "peering", PeeringToken: generatedToken.PeeringToken}, nil) - require.NoError(r, err) - }) - + _, _, err = dialerClient.Peerings().Establish(context.Background(), api.PeeringEstablishRequest{PeerName: "peering", PeeringToken: generatedToken.PeeringToken}, nil) + require.NoError(t, err) k8sObjects = append(k8sObjects, createSecret("dialer-token-old", "default", "token", "old-token")) // Create a new token to be potentially used by Reconcile(). The original token has already been @@ -526,11 +512,10 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { // Create the peering dialer controller controller := &PeeringDialerController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: watcher, - Scheme: s, + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: dialerClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "peering", @@ -741,20 +726,29 @@ func TestReconcileDeletePeeringDialer(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = "test-node" + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) // Add the initial peerings into Consul by calling the Generate token endpoint. - _, _, err := consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "dialer-deleted"}, nil) + _, _, err = consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "dialer-deleted"}, nil) require.NoError(t, err) // Create the peering dialer controller. pdc := &PeeringDialerController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "dialer-deleted", diff --git a/control-plane/connect-inject/redirect_traffic.go b/control-plane/connect-inject/redirect_traffic.go index 0b896eea99..895b5befbe 100644 --- a/control-plane/connect-inject/redirect_traffic.go +++ b/control-plane/connect-inject/redirect_traffic.go @@ -3,26 +3,26 @@ package connectinject import ( "encoding/json" "fmt" + "os" "strconv" "github.com/hashicorp/consul/sdk/iptables" corev1 "k8s.io/api/core/v1" ) -// addRedirectTrafficConfigAnnotation creates an iptables.Config in JSON format based on proxy configuration. +// addRedirectTrafficConfigAnnotation creates an iptables.Config based on proxy configuration. // iptables.Config: -// -// ConsulDNSIP: an environment variable named RESOURCE_PREFIX_DNS_SERVICE_HOST where RESOURCE_PREFIX is the consul.fullname in helm. -// ProxyUserID: a constant set in Annotations -// ProxyInboundPort: the service port or bind port -// ProxyOutboundPort: default transparent proxy outbound port or transparent proxy outbound listener port -// ExcludeInboundPorts: prometheus, envoy stats, expose paths, checks and excluded pod annotations -// ExcludeOutboundPorts: pod annotations -// ExcludeOutboundCIDRs: pod annotations -// ExcludeUIDs: pod annotations -func (w *MeshWebhook) iptablesConfigJSON(pod corev1.Pod, ns corev1.Namespace) (string, error) { +// ConsulDNSIP: an environment variable named RESOURCE_PREFIX_DNS_SERVICE_HOST where RESOURCE_PREFIX is the consul.fullname in helm. +// ProxyUserID: a constant set in Annotations +// ProxyInboundPort: the service port or bind port +// ProxyOutboundPort: default transparent proxy outbound port or transparent proxy outbound listener port +// ExcludeInboundPorts: prometheus, envoy stats, expose paths, checks and excluded pod annotations +// ExcludeOutboundPorts: pod annotations +// ExcludeOutboundCIDRs: pod annotations +// ExcludeUIDs: pod annotations +func (w *MeshWebhook) addRedirectTrafficConfigAnnotation(pod *corev1.Pod, ns corev1.Namespace) error { cfg := iptables.Config{ - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), } // Set the proxy's inbound port. @@ -32,28 +32,28 @@ func (w *MeshWebhook) iptablesConfigJSON(pod corev1.Pod, ns corev1.Namespace) (s cfg.ProxyOutboundPort = iptables.DefaultTProxyOutboundPort // If metrics are enabled, get the prometheusScrapePort and exclude it from the inbound ports - enableMetrics, err := w.MetricsConfig.enableMetrics(pod) + enableMetrics, err := w.MetricsConfig.enableMetrics(*pod) if err != nil { - return "", err + return err } if enableMetrics { - prometheusScrapePort, err := w.MetricsConfig.prometheusScrapePort(pod) + prometheusScrapePort, err := w.MetricsConfig.prometheusScrapePort(*pod) if err != nil { - return "", err + return err } cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, prometheusScrapePort) } // Exclude any overwritten liveness/readiness/startup ports from redirection. - overwriteProbes, err := shouldOverwriteProbes(pod, w.TProxyOverwriteProbes) + overwriteProbes, err := shouldOverwriteProbes(*pod, w.TProxyOverwriteProbes) if err != nil { - return "", err + return err } if overwriteProbes { for i, container := range pod.Spec.Containers { // skip the "envoy-sidecar" container from having its probes overridden - if container.Name == sidecarContainer { + if container.Name == envoySidecarContainer { continue } if container.LivenessProbe != nil && container.LivenessProbe.HTTPGet != nil { @@ -69,53 +69,47 @@ func (w *MeshWebhook) iptablesConfigJSON(pod corev1.Pod, ns corev1.Namespace) (s } // Inbound ports - excludeInboundPorts := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeInboundPorts, pod) + excludeInboundPorts := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeInboundPorts, *pod) cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, excludeInboundPorts...) // Outbound ports - excludeOutboundPorts := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundPorts, pod) + excludeOutboundPorts := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundPorts, *pod) cfg.ExcludeOutboundPorts = append(cfg.ExcludeOutboundPorts, excludeOutboundPorts...) // Outbound CIDRs - excludeOutboundCIDRs := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundCIDRs, pod) + excludeOutboundCIDRs := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundCIDRs, *pod) cfg.ExcludeOutboundCIDRs = append(cfg.ExcludeOutboundCIDRs, excludeOutboundCIDRs...) // UIDs - excludeUIDs := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeUIDs, pod) + excludeUIDs := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeUIDs, *pod) cfg.ExcludeUIDs = append(cfg.ExcludeUIDs, excludeUIDs...) // Add init container user ID to exclude from traffic redirection. cfg.ExcludeUIDs = append(cfg.ExcludeUIDs, strconv.Itoa(initContainersUserAndGroupID)) - dnsEnabled, err := consulDNSEnabled(ns, pod, w.EnableConsulDNS) + dnsEnabled, err := consulDNSEnabled(ns, *pod, w.EnableConsulDNS) if err != nil { - return "", err + return err } + var consulDNSClusterIP string if dnsEnabled { // If Consul DNS is enabled, we find the environment variable that has the value // of the ClusterIP of the Consul DNS Service. constructDNSServiceHostName returns // the name of the env variable whose value is the ClusterIP of the Consul DNS Service. - cfg.ConsulDNSIP = ConsulDataplaneDNSBindHost - cfg.ConsulDNSPort = ConsulDataplaneDNSBindPort + consulDNSClusterIP = os.Getenv(w.constructDNSServiceHostName()) + if consulDNSClusterIP == "" { + return fmt.Errorf("environment variable %s not found", w.constructDNSServiceHostName()) + } + cfg.ConsulDNSIP = consulDNSClusterIP } iptablesConfigJson, err := json.Marshal(&cfg) if err != nil { - return "", fmt.Errorf("could not marshal iptables config: %w", err) - } - - return string(iptablesConfigJson), nil -} - -// addRedirectTrafficConfigAnnotation add the created iptables JSON config as an annotation on the provided pod. -func (w *MeshWebhook) addRedirectTrafficConfigAnnotation(pod *corev1.Pod, ns corev1.Namespace) error { - iptablesConfig, err := w.iptablesConfigJSON(*pod, ns) - if err != nil { - return err + return fmt.Errorf("could not marshal iptables config: %w", err) } - pod.Annotations[annotationRedirectTraffic] = iptablesConfig + pod.Annotations[annotationRedirectTraffic] = string(iptablesConfigJson) return nil } diff --git a/control-plane/connect-inject/redirect_traffic_test.go b/control-plane/connect-inject/redirect_traffic_test.go index 970301c13b..838f1c0c25 100644 --- a/control-plane/connect-inject/redirect_traffic_test.go +++ b/control-plane/connect-inject/redirect_traffic_test.go @@ -3,12 +3,12 @@ package connectinject import ( "encoding/json" "fmt" + "os" "strconv" "testing" mapset "github.com/deckarep/golang-set" logrtest "github.com/go-logr/logr/testing" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul/sdk/iptables" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -22,6 +22,9 @@ import ( const ( defaultPodName = "fakePod" defaultNamespace = "default" + resourcePrefix = "CONSUL" + dnsEnvVariable = "CONSUL_DNS_SERVICE_HOST" + dnsIP = "127.0.0.1" ) func TestAddRedirectTrafficConfig(t *testing.T) { @@ -65,7 +68,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, @@ -98,7 +101,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, @@ -132,7 +135,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, @@ -174,7 +177,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, @@ -207,7 +210,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, @@ -240,7 +243,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, @@ -273,7 +276,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{strconv.Itoa(initContainersUserAndGroupID)}, @@ -306,7 +309,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"4444", "44444", strconv.Itoa(initContainersUserAndGroupID)}, @@ -340,7 +343,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, }, expCfg: iptables.Config{ - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeInboundPorts: []string{"1111", "11111"}, @@ -349,94 +353,94 @@ func TestAddRedirectTrafficConfig(t *testing.T) { ExcludeUIDs: []string{"4444", "44444", strconv.Itoa(initContainersUserAndGroupID)}, }, }, + { + name: "dns enabled", + dnsEnabled: true, + webhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + ResourcePrefix: resourcePrefix, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + keyConsulDNS: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: dnsIP, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{strconv.Itoa(initContainersUserAndGroupID)}, + }, + }, + { + name: "dns annotation set but environment variable missing", + dnsEnabled: false, + webhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + ResourcePrefix: resourcePrefix, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + keyConsulDNS: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: dnsIP, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{strconv.Itoa(initContainersUserAndGroupID)}, + }, + expErr: fmt.Errorf("environment variable %s not found", dnsEnvVariable), + }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - err = c.webhook.addRedirectTrafficConfigAnnotation(c.pod, c.namespace) + if c.dnsEnabled { + os.Setenv(dnsEnvVariable, dnsIP) + } else { + os.Setenv(dnsEnvVariable, "") + } + err := c.webhook.addRedirectTrafficConfigAnnotation(c.pod, c.namespace) + require.Equal(t, c.expErr, err) // Only compare annotation and iptables config on successful runs if c.expErr == nil { - require.NoError(t, err) anno, ok := c.pod.Annotations[annotationRedirectTraffic] require.Equal(t, ok, true) actualConfig := iptables.Config{} - err = json.Unmarshal([]byte(anno), &actualConfig) - require.NoError(t, err) + json.Unmarshal([]byte(anno), &actualConfig) require.Equal(t, c.expCfg, actualConfig) - } else { - require.EqualError(t, err, c.expErr.Error()) - } - }) - } -} - -func TestRedirectTraffic_consulDNS(t *testing.T) { - cases := map[string]struct { - globalEnabled bool - annotations map[string]string - namespaceLabel map[string]string - expectConsulDNSConfig bool - }{ - "enabled globally, ns not set, annotation not provided": { - globalEnabled: true, - expectConsulDNSConfig: true, - }, - "enabled globally, ns not set, annotation is false": { - globalEnabled: true, - annotations: map[string]string{keyConsulDNS: "false"}, - expectConsulDNSConfig: false, - }, - "enabled globally, ns not set, annotation is true": { - globalEnabled: true, - annotations: map[string]string{keyConsulDNS: "true"}, - expectConsulDNSConfig: true, - }, - "disabled globally, ns not set, annotation not provided": { - expectConsulDNSConfig: false, - }, - "disabled globally, ns not set, annotation is false": { - annotations: map[string]string{keyConsulDNS: "false"}, - expectConsulDNSConfig: false, - }, - "disabled globally, ns not set, annotation is true": { - annotations: map[string]string{keyConsulDNS: "true"}, - expectConsulDNSConfig: true, - }, - "disabled globally, ns enabled, annotation not set": { - namespaceLabel: map[string]string{keyConsulDNS: "true"}, - expectConsulDNSConfig: true, - }, - "enabled globally, ns disabled, annotation not set": { - globalEnabled: true, - namespaceLabel: map[string]string{keyConsulDNS: "false"}, - expectConsulDNSConfig: false, - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - w := MeshWebhook{ - EnableConsulDNS: c.globalEnabled, - EnableTransparentProxy: true, - ConsulConfig: &consul.Config{HTTPPort: 8500}, - } - - pod := minimal() - pod.Annotations = c.annotations - - ns := testNS - ns.Labels = c.namespaceLabel - iptablesConfig, err := w.iptablesConfigJSON(*pod, ns) - require.NoError(t, err) - - actualConfig := iptables.Config{} - err = json.Unmarshal([]byte(iptablesConfig), &actualConfig) - require.NoError(t, err) - if c.expectConsulDNSConfig { - require.Equal(t, "127.0.0.1", actualConfig.ConsulDNSIP) - require.Equal(t, 8600, actualConfig.ConsulDNSPort) - } else { - require.Empty(t, actualConfig.ConsulDNSIP) } }) } diff --git a/control-plane/consul/consul.go b/control-plane/consul/consul.go index bb46308ff8..ad4feec785 100644 --- a/control-plane/consul/consul.go +++ b/control-plane/consul/consul.go @@ -6,17 +6,9 @@ import ( "time" "github.com/hashicorp/consul-k8s/control-plane/version" - "github.com/hashicorp/consul-server-connection-manager/discovery" capi "github.com/hashicorp/consul/api" ) -//go:generate mockery --name ServerConnectionManager --inpkg -type ServerConnectionManager interface { - State() (discovery.State, error) - Run() - Stop() -} - // NewClient returns a Consul API client. It adds a required User-Agent // header that describes the version of consul-k8s making the call. func NewClient(config *capi.Config, consulAPITimeout time.Duration) (*capi.Client, error) { @@ -57,36 +49,3 @@ func NewClient(config *capi.Config, consulAPITimeout time.Duration) (*capi.Clien client.AddHeader("User-Agent", fmt.Sprintf("consul-k8s/%s", version.GetHumanVersion())) return client, nil } - -type Config struct { - APIClientConfig *capi.Config - HTTPPort int - GRPCPort int - APITimeout time.Duration -} - -// todo (ishustava): replace all usages of this one. -// NewClientFromConnMgrState creates a new API client with an IP address from the state -// of the consul-server-connection-manager. -func NewClientFromConnMgrState(config *Config, state discovery.State) (*capi.Client, error) { - ipAddress := state.Address.IP - config.APIClientConfig.Address = fmt.Sprintf("%s:%d", ipAddress.String(), config.HTTPPort) - if state.Token != "" { - config.APIClientConfig.Token = state.Token - } - return NewClient(config.APIClientConfig, config.APITimeout) -} - -// NewClientFromConnMgr creates a new API client by first getting the state of the passed watcher. -func NewClientFromConnMgr(config *Config, watcher ServerConnectionManager) (*capi.Client, error) { - // Create a new consul client. - serverState, err := watcher.State() - if err != nil { - return nil, err - } - consulClient, err := NewClientFromConnMgrState(config, serverState) - if err != nil { - return nil, err - } - return consulClient, nil -} diff --git a/control-plane/consul/mock_ServerConnectionManager.go b/control-plane/consul/mock_ServerConnectionManager.go deleted file mode 100644 index d0189c5380..0000000000 --- a/control-plane/consul/mock_ServerConnectionManager.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. - -package consul - -import ( - discovery "github.com/hashicorp/consul-server-connection-manager/discovery" - mock "github.com/stretchr/testify/mock" -) - -// MockServerConnectionManager is an autogenerated mock type for the ServerConnectionManager type -type MockServerConnectionManager struct { - mock.Mock -} - -// Run provides a mock function with given fields: -func (_m *MockServerConnectionManager) Run() { - _m.Called() -} - -// State provides a mock function with given fields: -func (_m *MockServerConnectionManager) State() (discovery.State, error) { - ret := _m.Called() - - var r0 discovery.State - if rf, ok := ret.Get(0).(func() discovery.State); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(discovery.State) - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Stop provides a mock function with given fields: -func (_m *MockServerConnectionManager) Stop() { - _m.Called() -} - -type mockConstructorTestingTNewMockServerConnectionManager interface { - mock.TestingT - Cleanup(func()) -} - -// NewMockServerConnectionManager creates a new instance of MockServerConnectionManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMockServerConnectionManager(t mockConstructorTestingTNewMockServerConnectionManager) *MockServerConnectionManager { - mock := &MockServerConnectionManager{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/control-plane/controller/configentry_controller.go b/control-plane/controller/configentry_controller.go index 8ae90a56a6..94206c8f4d 100644 --- a/control-plane/controller/configentry_controller.go +++ b/control-plane/controller/configentry_controller.go @@ -9,7 +9,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" capi "github.com/hashicorp/consul/api" "golang.org/x/time/rate" @@ -51,11 +50,7 @@ type Controller interface { // all config entry types, e.g. ServiceDefaults, ServiceResolver, etc, since // they share the same reconcile behaviour. type ConfigEntryController struct { - // ConsulClientConfig is the config for the Consul API client. - ConsulClientConfig *consul.Config - - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager + ConsulClient *capi.Client // DatacenterName indicates the Consul Datacenter name the controller is // operating in. Adds this value as metadata on managed resources. @@ -102,18 +97,6 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont return ctrl.Result{}, err } - // Create Consul client for this reconcile. - serverState, err := r.ConsulServerConnMgr.State() - if err != nil { - logger.Error(err, "failed to get Consul server state", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - consulClient, err := consul.NewClientFromConnMgrState(r.ConsulClientConfig, serverState) - if err != nil { - logger.Error(err, "failed to create Consul API client", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - consulEntry := configEntry.ToConsul(r.DatacenterName) if configEntry.GetDeletionTimestamp().IsZero() { @@ -131,7 +114,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont if containsString(configEntry.GetFinalizers(), FinalizerName) { logger.Info("deletion event") // Check to see if consul has config entry with the same name - entry, _, err := consulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ + entry, _, err := r.ConsulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) @@ -142,7 +125,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } else if err == nil { // Only delete the resource from Consul if it is owned by our datacenter. if entry.GetMeta()[common.DatacenterKey] == r.DatacenterName { - _, err := consulClient.ConfigEntries().Delete(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.WriteOptions{ + _, err := r.ConsulClient.ConfigEntries().Delete(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { @@ -167,7 +150,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } // Check to see if consul has config entry with the same name - entry, _, err := consulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ + entry, _, err := r.ConsulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) // If a config entry with this name does not exist @@ -178,7 +161,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont // destination consul namespace first. if r.EnableConsulNamespaces { consulNS := r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()) - created, err := namespaces.EnsureExists(consulClient, consulNS, r.CrossNSACLPolicy) + created, err := namespaces.EnsureExists(r.ConsulClient, consulNS, r.CrossNSACLPolicy) if err != nil { return r.syncFailed(ctx, logger, crdCtrl, configEntry, ConsulAgentError, fmt.Errorf("creating consul namespace %q: %w", consulNS, err)) @@ -189,7 +172,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } // Create the config entry - _, writeMeta, err := consulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { @@ -237,7 +220,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } logger.Info("config entry does not match consul", "modify-index", entry.GetModifyIndex()) - _, writeMeta, err := consulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { @@ -251,7 +234,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont // matches the entry in Kubernetes. We just need to update the metadata // of the entry in Consul to say that it's now managed by Kubernetes. logger.Info("migrating config entry to be managed by Kubernetes") - _, writeMeta, err := consulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { diff --git a/control-plane/controller/configentry_controller_ent_test.go b/control-plane/controller/configentry_controller_ent_test.go index 61a6aef947..7b40947df5 100644 --- a/control-plane/controller/configentry_controller_ent_test.go +++ b/control-plane/controller/configentry_controller_ent_test.go @@ -13,8 +13,8 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" "github.com/hashicorp/consul-k8s/control-plane/controller" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" capi "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -193,9 +193,14 @@ func TestConfigEntryController_createsConfigEntry_consulNamespaces(tt *testing.T s.AddKnownTypes(v1alpha1.GroupVersion, in.KubeResource) ctx := context.Background() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(in.KubeResource).Build() @@ -204,8 +209,7 @@ func TestConfigEntryController_createsConfigEntry_consulNamespaces(tt *testing.T logrtest.TestLogger{T: t}, s, &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -455,9 +459,14 @@ func TestConfigEntryController_updatesConfigEntry_consulNamespaces(tt *testing.T s.AddKnownTypes(v1alpha1.GroupVersion, in.KubeResource) ctx := context.Background() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(in.KubeResource).Build() @@ -466,8 +475,7 @@ func TestConfigEntryController_updatesConfigEntry_consulNamespaces(tt *testing.T logrtest.TestLogger{T: t}, s, &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -491,14 +499,14 @@ func TestConfigEntryController_updatesConfigEntry_consulNamespaces(tt *testing.T // Now update it. { // First get it so we have the latest revision number. - err := fakeClient.Get(ctx, types.NamespacedName{ + err = fakeClient.Get(ctx, types.NamespacedName{ Namespace: c.SourceKubeNS, Name: in.KubeResource.KubernetesName(), }, in.KubeResource) req.NoError(err) // Update the resource. - err = in.UpdateResourceFunc(fakeClient, ctx, in.KubeResource) + err := in.UpdateResourceFunc(fakeClient, ctx, in.KubeResource) req.NoError(err) resp, err := r.Reconcile(ctx, ctrl.Request{ @@ -704,9 +712,14 @@ func TestConfigEntryController_deletesConfigEntry_consulNamespaces(tt *testing.T s := runtime.NewScheme() s.AddKnownTypes(v1alpha1.GroupVersion, in.KubeResource) - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(in.KubeResource).Build() @@ -715,8 +728,7 @@ func TestConfigEntryController_deletesConfigEntry_consulNamespaces(tt *testing.T logrtest.TestLogger{T: t}, s, &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, diff --git a/control-plane/controller/configentry_controller_test.go b/control-plane/controller/configentry_controller_test.go index 83b9e3eecf..5a26d9abd6 100644 --- a/control-plane/controller/configentry_controller_test.go +++ b/control-plane/controller/configentry_controller_test.go @@ -12,9 +12,8 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" capi "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,7 +39,7 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { consulKind string consulPrereqs []capi.ConfigEntry configEntryResource common.ConfigEntryResource - reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + reconciler func(client.Client, *capi.Client, logr.Logger) testReconciler compare func(t *testing.T, consul capi.ConfigEntry) }{ { @@ -54,18 +53,15 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { Spec: v1alpha1.ServiceDefaultsSpec{ Protocol: "http", MaxInboundConnections: 100, - LocalConnectTimeoutMs: 5000, - LocalRequestTimeoutMs: 15000, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -74,8 +70,6 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { require.True(t, ok, "cast error") require.Equal(t, "http", svcDefault.Protocol) require.Equal(t, 100, svcDefault.MaxInboundConnections) - require.Equal(t, 5000, svcDefault.LocalConnectTimeoutMs) - require.Equal(t, 15000, svcDefault.LocalRequestTimeoutMs) }, }, { @@ -92,14 +86,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceResolverController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -123,14 +116,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ProxyDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -154,14 +146,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &MeshController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -198,14 +189,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceRouterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -238,14 +228,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceSplitterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -317,14 +306,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceIntentionsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -366,14 +354,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &IngressGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -406,14 +393,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &TerminatingGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -439,17 +425,22 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, c.configEntryResource) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.configEntryResource).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) for _, configEntry := range c.consulPrereqs { written, _, err := consulClient.ConfigEntries().Set(configEntry, nil) req.NoError(err) req.True(written) } - r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.TestLogger{T: t}) + r := c.reconciler(fakeClient, consulClient, logrtest.TestLogger{T: t}) namespacedName := types.NamespacedName{ Namespace: kubeNS, Name: c.configEntryResource.KubernetesName(), @@ -485,7 +476,7 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { consulKind string consulPrereqs []capi.ConfigEntry configEntryResource common.ConfigEntryResource - reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + reconciler func(client.Client, *capi.Client, logr.Logger) testReconciler updateF func(common.ConfigEntryResource) compare func(t *testing.T, consul capi.ConfigEntry) }{ @@ -501,14 +492,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { Protocol: "http", }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -536,14 +526,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceResolverController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -571,14 +560,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ProxyDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -606,14 +594,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &MeshController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -655,14 +642,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceSplitterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -713,14 +699,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceRouterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -787,14 +772,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceIntentionsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -835,14 +819,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &IngressGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -879,14 +862,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &TerminatingGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -916,9 +898,15 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, c.configEntryResource) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.configEntryResource).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) // Create any prereqs. for _, configEntry := range c.consulPrereqs { @@ -942,14 +930,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { Name: c.configEntryResource.KubernetesName(), } // First get it so we have the latest revision number. - err := fakeClient.Get(ctx, namespacedName, c.configEntryResource) + err = fakeClient.Get(ctx, namespacedName, c.configEntryResource) req.NoError(err) // Update the entry in Kube and run reconcile. c.updateF(c.configEntryResource) - err = fakeClient.Update(ctx, c.configEntryResource) + err := fakeClient.Update(ctx, c.configEntryResource) req.NoError(err) - r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.TestLogger{T: t}) + r := c.reconciler(fakeClient, consulClient, logrtest.TestLogger{T: t}) resp, err := r.Reconcile(ctx, ctrl.Request{ NamespacedName: namespacedName, }) @@ -975,7 +963,7 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { consulKind string consulPrereq []capi.ConfigEntry configEntryResourceWithDeletion common.ConfigEntryResource - reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + reconciler func(client.Client, *capi.Client, logr.Logger) testReconciler }{ { kubeKind: "ServiceDefaults", @@ -991,14 +979,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { Protocol: "http", }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1019,14 +1006,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceResolverController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1047,14 +1033,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ProxyDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1075,14 +1060,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &MeshController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1117,14 +1101,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceRouterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1154,14 +1137,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceSplitterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1221,14 +1203,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceIntentionsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1260,14 +1241,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &IngressGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1294,14 +1274,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &TerminatingGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1316,9 +1295,15 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, c.configEntryResourceWithDeletion) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.configEntryResourceWithDeletion).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) // Create any prereqs. for _, configEntry := range c.consulPrereq { @@ -1341,7 +1326,7 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { Namespace: kubeNS, Name: c.configEntryResourceWithDeletion.KubernetesName(), } - r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.TestLogger{T: t}) + r := c.reconciler(fakeClient, consulClient, logrtest.TestLogger{T: t}) resp, err := r.Reconcile(context.Background(), ctrl.Request{ NamespacedName: namespacedName, }) @@ -1377,22 +1362,18 @@ func TestConfigEntryControllers_errorUpdatesSyncStatus(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, svcDefaults) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaults).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - - // Get watcher state to make sure we can get a healthy address. - _, err := testClient.Watcher.State() - require.NoError(t, err) - // Stop the server before calling reconcile imitating a server that's not running. - _ = testClient.TestServer.Stop() - + // Construct a Consul client that will error by giving it + // an unresolvable address. + consulClient, err := capi.NewClient(&capi.Config{ + Address: "incorrect-address", + }) + req.NoError(err) reconciler := &ServiceDefaultsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } @@ -1406,8 +1387,7 @@ func TestConfigEntryControllers_errorUpdatesSyncStatus(t *testing.T) { }) req.Error(err) - expErr := fmt.Sprintf("Get \"http://127.0.0.1:%d/v1/config/%s/%s\": dial tcp 127.0.0.1:%d: connect: connection refused", - testClient.Cfg.HTTPPort, capi.ServiceDefaults, svcDefaults.ConsulName(), testClient.Cfg.HTTPPort) + expErr := fmt.Sprintf("Get \"http://incorrect-address/v1/config/%s/%s\": dial tcp: lookup incorrect-address", capi.ServiceDefaults, svcDefaults.ConsulName()) req.Contains(err.Error(), expErr) req.False(resp.Requeue) @@ -1450,22 +1430,27 @@ func TestConfigEntryControllers_setsSyncedToTrue(t *testing.T) { // The config entry exists in kube but its status will be nil. fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaults).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) reconciler := &ServiceDefaultsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } // Create the resource in Consul to mimic that it was created // successfully (but its status hasn't been updated). - _, _, err := consulClient.ConfigEntries().Set(svcDefaults.ToConsul(datacenterName), nil) + _, _, err = consulClient.ConfigEntries().Set(svcDefaults.ToConsul(datacenterName), nil) require.NoError(t, err) namespacedName := types.NamespacedName{ @@ -1522,9 +1507,15 @@ func TestConfigEntryControllers_doesNotCreateUnownedConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, svcDefaults) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaults).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) // We haven't run reconcile yet. We must create the config entry // in Consul ourselves in a different datacenter. @@ -1541,7 +1532,7 @@ func TestConfigEntryControllers_doesNotCreateUnownedConfigEntry(t *testing.T) { Name: svcDefaults.KubernetesName(), } // First get it so we have the latest revision number. - err := fakeClient.Get(ctx, namespacedName, svcDefaults) + err = fakeClient.Get(ctx, namespacedName, svcDefaults) req.NoError(err) // Attempt to create the entry in Kube and run reconcile. @@ -1549,9 +1540,8 @@ func TestConfigEntryControllers_doesNotCreateUnownedConfigEntry(t *testing.T) { Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } resp, err := reconciler.Reconcile(ctx, ctrl.Request{ @@ -1606,16 +1596,21 @@ func TestConfigEntryControllers_doesNotDeleteUnownedConfig(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, svcDefaultsWithDeletion) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaultsWithDeletion).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) reconciler := &ServiceDefaultsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } @@ -1688,8 +1683,15 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(defaults, splitter).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) + consul, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + require.NoError(t, err) logger := logrtest.TestLogger{T: t} @@ -1697,18 +1699,16 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) { Client: fakeClient, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } svcSplitterReconciler := ServiceSplitterController{ Client: fakeClient, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } @@ -1819,9 +1819,15 @@ func TestConfigEntryController_Migration(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.ServiceDefaults{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(&c.KubeResource).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + require.NoError(t, err) // Create the service-defaults in Consul. success, _, err := consulClient.ConfigEntries().Set(&c.ConsulResource, nil) @@ -1834,9 +1840,8 @@ func TestConfigEntryController_Migration(t *testing.T) { Client: fakeClient, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } diff --git a/control-plane/controller/exportedservices_controller_ent_test.go b/control-plane/controller/exportedservices_controller_ent_test.go index dd91c49b57..ec8f771586 100644 --- a/control-plane/controller/exportedservices_controller_ent_test.go +++ b/control-plane/controller/exportedservices_controller_ent_test.go @@ -12,8 +12,8 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" "github.com/hashicorp/consul-k8s/control-plane/controller" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" capi "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -94,9 +94,14 @@ func TestExportedServicesController_createsExportedServices(tt *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, exportedServices) ctx := context.Background() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(exportedServices).Build() @@ -105,8 +110,7 @@ func TestExportedServicesController_createsExportedServices(tt *testing.T) { Log: logrtest.TestLogger{T: t}, Scheme: s, ConfigEntryController: &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -210,9 +214,15 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, exportedServices) ctx := context.Background() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(exportedServices).Build() controller := &controller.ExportedServicesController{ @@ -220,8 +230,7 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { Log: logrtest.TestLogger{T: t}, Scheme: s, ConfigEntryController: &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -250,7 +259,7 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { // Now update it. { // First get it so we have the latest revision number. - err := fakeClient.Get(ctx, types.NamespacedName{ + err = fakeClient.Get(ctx, types.NamespacedName{ Namespace: c.SourceKubeNS, Name: exportedServices.KubernetesName(), }, exportedServices) @@ -258,7 +267,7 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { // Update the resource. exportedServices.Spec.Services[0].Name = "backend" - err = fakeClient.Update(ctx, exportedServices) + err := fakeClient.Update(ctx, exportedServices) req.NoError(err) resp, err := controller.Reconcile(ctx, ctrl.Request{ @@ -347,9 +356,14 @@ func TestExportedServicesController_deletesExportedServices(tt *testing.T) { } s.AddKnownTypes(v1alpha1.GroupVersion, exportedServices) - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(exportedServices).Build() @@ -358,8 +372,7 @@ func TestExportedServicesController_deletesExportedServices(tt *testing.T) { Log: logrtest.TestLogger{T: t}, Scheme: s, ConfigEntryController: &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, diff --git a/control-plane/go.mod b/control-plane/go.mod index 3aef2d1424..8d0f894729 100644 --- a/control-plane/go.mod +++ b/control-plane/go.mod @@ -9,15 +9,12 @@ require ( github.com/google/go-cmp v0.5.7 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20220831174802-b8af65262de8 - github.com/hashicorp/consul-server-connection-manager v0.0.0-20220922180412-01c5be1c636f - github.com/hashicorp/consul/api v1.10.1-0.20221005170644-13da2c5fad69 + github.com/hashicorp/consul/api v1.10.1-0.20220913205944-e743eefbd104 github.com/hashicorp/consul/sdk v0.11.0 github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f github.com/hashicorp/go-hclog v1.2.2 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-netaddrs v0.0.0-20220509001840-90ed9d26ec46 - github.com/hashicorp/go-rootcerts v1.0.2 - github.com/hashicorp/serf v0.10.1 + github.com/hashicorp/serf v0.9.7 github.com/kr/text v0.2.0 github.com/miekg/dns v1.1.41 github.com/mitchellh/cli v1.1.0 @@ -49,12 +46,11 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/armon/go-metrics v0.4.1 // indirect + github.com/armon/go-metrics v0.3.10 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.25.41 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 // indirect @@ -73,10 +69,10 @@ require ( github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gophercloud/gophercloud v0.1.0 // indirect - github.com/hashicorp/consul/proto-public v0.1.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.0 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/mdns v1.0.4 // indirect @@ -114,7 +110,7 @@ require ( golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect + golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d // indirect golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/api v0.43.0 // indirect @@ -133,6 +129,4 @@ require ( sigs.k8s.io/yaml v1.2.0 // indirect ) -replace github.com/hashicorp/consul/sdk => github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892 - -go 1.19 +go 1.18 diff --git a/control-plane/go.sum b/control-plane/go.sum index 41ed7e8566..00ee1f4970 100644 --- a/control-plane/go.sum +++ b/control-plane/go.sum @@ -96,9 +96,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -118,8 +117,6 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -344,15 +341,13 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20220831174802-b8af65262de8 h1:TQY0oKtLV15UNYWeSkTxi4McBIyLecsEtbc/VfxvbYA= github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20220831174802-b8af65262de8/go.mod h1:aw35GB76URgbtxaSSMxbOetbG7YEHHPkIX3/SkTBaWc= -github.com/hashicorp/consul-server-connection-manager v0.0.0-20220922180412-01c5be1c636f h1:niyK8S2Vb48YumFkxsqzSl+72tDXgvpAEO6KrL3WwAw= -github.com/hashicorp/consul-server-connection-manager v0.0.0-20220922180412-01c5be1c636f/go.mod h1:I56VZ1V7WN8/oPHswKDywfepvD7rB1RrTE4fRrNz3Wc= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.10.1-0.20221005170644-13da2c5fad69 h1:IALuDSO0f6x0txq/tjUDF3sShyDMT8dmjn9af6Ik8BA= -github.com/hashicorp/consul/api v1.10.1-0.20221005170644-13da2c5fad69/go.mod h1:T09kWtKqm8j1S9yTd1r0hVhfOyPrvLb0zb6dPKpNXxQ= -github.com/hashicorp/consul/proto-public v0.1.0 h1:O0LSmCqydZi363hsqc6n2v5sMz3usQMXZF6ziK3SzXU= -github.com/hashicorp/consul/proto-public v0.1.0/go.mod h1:vs2KkuWwtjkIgA5ezp4YKPzQp4GitV+q/+PvksrA92k= -github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892 h1:jw0NwPmNPr5CxAU04hACdj61JSaJBKZ0FdBo+kwfNp4= -github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/api v1.10.1-0.20220913205944-e743eefbd104 h1:NW0jZq0suX2gfHVFmKuJ5DGLXSP7qN9FmjQOU764fFQ= +github.com/hashicorp/consul/api v1.10.1-0.20220913205944-e743eefbd104/go.mod h1:bcaw5CSZ7NE9qfOfKCI1xb7ZKjzu/MyvQkCLTfqLqxQ= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.10.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= +github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -375,8 +370,6 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-netaddrs v0.0.0-20220509001840-90ed9d26ec46 h1:BysEAd6g+0HNJ0v99u7KbSObjzxC7rfVQ6yVx6HxrvU= -github.com/hashicorp/go-netaddrs v0.0.0-20220509001840-90ed9d26ec46/go.mod h1:TjKbv4FhIra0YJ82mws5+4QXOhzv09eAWs4jtOBI4IU= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= @@ -401,11 +394,12 @@ github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= -github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= +github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -492,6 +486,7 @@ github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXx github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= @@ -905,8 +900,8 @@ golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d h1:/m5NbqQelATgoSPVC2Z23sR4kVNokFwDDyWh/3rGY+I= +golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/control-plane/helper/test/test_util.go b/control-plane/helper/test/test_util.go index 0ad4601fde..08b28b00fd 100644 --- a/control-plane/helper/test/test_util.go +++ b/control-plane/helper/test/test_util.go @@ -2,7 +2,6 @@ package test import ( "fmt" - "net" "net/http" "net/http/httptest" "os" @@ -10,11 +9,8 @@ import ( "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/helper/cert" - "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" ) @@ -22,99 +18,44 @@ const ( componentAuthMethod = "consul-k8s-component-auth-method" ) -type TestServerClient struct { - TestServer *testutil.TestServer - APIClient *api.Client - Cfg *consul.Config - Watcher consul.ServerConnectionManager -} - -func TestServerWithMockConnMgrWatcher(t *testing.T, callback testutil.ServerConfigCallback) *TestServerClient { - t.Helper() - - var cfg *testutil.TestServerConfig - consulServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - if callback != nil { - callback(c) - } - cfg = c - }) - require.NoError(t, err) - t.Cleanup(func() { - _ = consulServer.Stop() - }) - consulServer.WaitForSerfCheck(t) - - consulConfig := &consul.Config{ - APIClientConfig: &api.Config{Address: consulServer.HTTPAddr}, - HTTPPort: cfg.Ports.HTTP, - } - if cfg.ACL.Tokens.InitialManagement != "" { - consulConfig.APIClientConfig.Token = cfg.ACL.Tokens.InitialManagement - } - client, err := api.NewClient(consulConfig.APIClientConfig) - require.NoError(t, err) - - return &TestServerClient{ - TestServer: consulServer, - APIClient: client, - Cfg: consulConfig, - Watcher: MockConnMgrForIPAndPort("127.0.0.1", cfg.Ports.GRPC), - } -} - -func MockConnMgrForIPAndPort(ip string, port int) *consul.MockServerConnectionManager { - parsedIP := net.ParseIP(ip) - connMgr := &consul.MockServerConnectionManager{} - mockState := discovery.State{ - Address: discovery.Addr{ - TCPAddr: net.TCPAddr{ - IP: parsedIP, - Port: port, - }, - }} - connMgr.On("State").Return(mockState, nil) - connMgr.On("Run").Return(nil) - connMgr.On("Stop").Return(nil) - return connMgr -} - // GenerateServerCerts generates Consul CA // and a server certificate and saves them to temp files. // It returns file names in this order: // CA certificate, server certificate, and server key. func GenerateServerCerts(t *testing.T) (string, string, string) { + require := require.New(t) + caFile, err := os.CreateTemp("", "ca") - require.NoError(t, err) + require.NoError(err) certFile, err := os.CreateTemp("", "cert") - require.NoError(t, err) + require.NoError(err) certKeyFile, err := os.CreateTemp("", "key") - require.NoError(t, err) + require.NoError(err) // Generate CA signer, _, caCertPem, caCertTemplate, err := cert.GenerateCA("Consul Agent CA - Test") - require.NoError(t, err) + require.NoError(err) // Generate Server Cert name := "server.dc1.consul" hosts := []string{name, "localhost", "127.0.0.1"} certPem, keyPem, err := cert.GenerateCert(name, 1*time.Hour, caCertTemplate, signer, hosts) - require.NoError(t, err) + require.NoError(err) // Write certs and key to files _, err = caFile.WriteString(caCertPem) - require.NoError(t, err) + require.NoError(err) _, err = certFile.WriteString(certPem) - require.NoError(t, err) + require.NoError(err) _, err = certKeyFile.WriteString(keyPem) - require.NoError(t, err) + require.NoError(err) t.Cleanup(func() { - _ = os.RemoveAll(caFile.Name()) - _ = os.RemoveAll(certFile.Name()) - _ = os.RemoveAll(certKeyFile.Name()) + os.Remove(caFile.Name()) + os.Remove(certFile.Name()) + os.Remove(certKeyFile.Name()) }) return caFile.Name(), certFile.Name(), certKeyFile.Name() } diff --git a/control-plane/subcommand/acl-init/command_test.go b/control-plane/subcommand/acl-init/command_test.go index 7dd77095d0..f069b5ec98 100644 --- a/control-plane/subcommand/acl-init/command_test.go +++ b/control-plane/subcommand/acl-init/command_test.go @@ -250,7 +250,7 @@ func TestRun_WithAclAuthMethodDefined_WritesConfigJson_WithTokenMatchingSinkFile tmpDir, err := os.MkdirTemp("", "") require.NoError(t, err) t.Cleanup(func() { - os.RemoveAll(tokenFile) + os.Remove(tokenFile) os.RemoveAll(tmpDir) }) diff --git a/control-plane/subcommand/common/common.go b/control-plane/subcommand/common/common.go index e3a569ddf6..5b8479c9ab 100644 --- a/control-plane/subcommand/common/common.go +++ b/control-plane/subcommand/common/common.go @@ -42,11 +42,6 @@ const ( // Logger returns an hclog instance with log level set and JSON logging enabled/disabled, or an error if level is invalid. func Logger(level string, jsonLogging bool) (hclog.Logger, error) { - return NamedLogger(level, jsonLogging, "") -} - -// NamedLogger Logger returns a named hclog instance with log level set and JSON logging enabled/disabled, or an error if level is invalid. -func NamedLogger(level string, jsonLogging bool, name string) (hclog.Logger, error) { parsedLevel := hclog.LevelFromString(level) if parsedLevel == hclog.NoLevel { return nil, fmt.Errorf("unknown log level: %s", level) @@ -55,7 +50,7 @@ func NamedLogger(level string, jsonLogging bool, name string) (hclog.Logger, err JSONFormat: jsonLogging, Level: parsedLevel, Output: os.Stderr, - }).Named(name), nil + }), nil } // ZapLogger returns a logr.Logger instance with log level set and JSON logging enabled/disabled, or an error if the level is invalid. @@ -104,8 +99,8 @@ type LoginParams struct { // Meta is the metadata to set on the token. Meta map[string]string - // NumRetries is the number of times to try to log in. - NumRetries uint64 + // numRetries is only used in tests to make them run faster. + numRetries uint64 } // ConsulLogin issues an ACL().Login to Consul and writes out the token to tokenSinkFile. @@ -121,8 +116,8 @@ func ConsulLogin(client *api.Client, params LoginParams, log hclog.Logger) (stri return "", fmt.Errorf("no bearer token found in %q", params.BearerTokenFile) } - if params.NumRetries == 0 { - params.NumRetries = numLoginRetries + if params.numRetries == 0 { + params.numRetries = numLoginRetries } var token *api.ACLToken err = backoff.Retry(func() error { @@ -149,7 +144,7 @@ func ConsulLogin(client *api.Client, params LoginParams, log hclog.Logger) (stri } } return err - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), params.NumRetries)) + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), params.numRetries)) if err != nil { log.Error("Hit maximum retries for consul login", "error", err) return "", err @@ -207,7 +202,7 @@ func WriteFileWithPerms(outputFile, payload string, mode os.FileMode) error { // os.WriteFile truncates existing files and overwrites them, but only if they are writable. // If the file exists it will already likely be read-only. Remove it first. if _, err := os.Stat(outputFile); err == nil { - if err = os.RemoveAll(outputFile); err != nil { + if err = os.Remove(outputFile); err != nil { return fmt.Errorf("unable to delete existing file: %s", err) } } diff --git a/control-plane/subcommand/common/common_test.go b/control-plane/subcommand/common/common_test.go index 521831473a..9bab362560 100644 --- a/control-plane/subcommand/common/common_test.go +++ b/control-plane/subcommand/common/common_test.go @@ -198,7 +198,7 @@ func TestConsulLogin_TokenFileUnwritable(t *testing.T) { AuthMethod: testAuthMethod, BearerTokenFile: bearerTokenFile, TokenSinkFile: randFileName, - NumRetries: 2, + numRetries: 2, } _, err = ConsulLogin(client, params, log) require.Error(err) @@ -210,7 +210,7 @@ func TestWriteFileWithPerms_InvalidOutputFile(t *testing.T) { rand.Seed(time.Now().UnixNano()) randFileName := fmt.Sprintf("/tmp/tmp/tmp/%d", rand.Int()) t.Cleanup(func() { - os.RemoveAll(randFileName) + os.Remove(randFileName) }) err := WriteFileWithPerms(randFileName, "", os.FileMode(0444)) require.Errorf(t, err, "unable to create file: %s", randFileName) @@ -223,7 +223,7 @@ func TestWriteFileWithPerms_OutputFileExists(t *testing.T) { err := os.WriteFile(randFileName, []byte("foo"), os.FileMode(0444)) require.NoError(t, err) t.Cleanup(func() { - os.RemoveAll(randFileName) + os.Remove(randFileName) }) payload := "abcd" err = WriteFileWithPerms(randFileName, payload, os.FileMode(0444)) @@ -239,7 +239,7 @@ func TestWriteFileWithPerms(t *testing.T) { rand.Seed(time.Now().UnixNano()) randFileName := fmt.Sprintf("/tmp/%d", rand.Int()) t.Cleanup(func() { - os.RemoveAll(randFileName) + os.Remove(randFileName) }) // Issue the write. mode := os.FileMode(0444) diff --git a/control-plane/subcommand/common/test_util.go b/control-plane/subcommand/common/test_util.go index 13d9017fe4..ff73e62ae3 100644 --- a/control-plane/subcommand/common/test_util.go +++ b/control-plane/subcommand/common/test_util.go @@ -17,7 +17,7 @@ func WriteTempFile(t *testing.T, contents string) string { require.NoError(t, err) t.Cleanup(func() { - os.RemoveAll(file.Name()) + os.Remove(file.Name()) }) return file.Name() } diff --git a/control-plane/subcommand/connect-init/command.go b/control-plane/subcommand/connect-init/command.go index 651be98a38..77d79221d2 100644 --- a/control-plane/subcommand/connect-init/command.go +++ b/control-plane/subcommand/connect-init/command.go @@ -1,34 +1,27 @@ package connectinit import ( - "context" - "encoding/json" "errors" "flag" "fmt" - "net" "os" - "os/signal" "sync" - "syscall" "time" "github.com/cenkalti/backoff" connectinject "github.com/hashicorp/consul-k8s/control-plane/connect-inject" "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/namespaces" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/iptables" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" - "github.com/mitchellh/mapstructure" ) const ( - defaultProxyIDFile = "/consul/connect-inject/proxyid" + defaultBearerTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + defaultTokenSinkFile = "/consul/connect-inject/acl-token" + defaultProxyIDFile = "/consul/connect-inject/proxyid" // The number of times to attempt to read this service (120s). defaultServicePollingRetries = 120 @@ -37,48 +30,43 @@ const ( type Command struct { UI cli.Ui - flagConsulNodeName string - flagPodName string // Pod name. - flagPodNamespace string // Pod namespace. - flagServiceAccountName string // Service account name. - flagServiceName string // Service name. - flagGatewayKind string - flagRedirectTrafficConfig string - flagLogLevel string - flagLogJSON bool - - flagProxyIDFile string // Location to write the output proxyID. Default is defaultProxyIDFile. - flagMultiPort bool - + flagACLAuthMethod string // Auth Method to use for ACLs, if enabled. + flagPodName string // Pod name. + flagPodNamespace string // Pod namespace. + flagAuthMethodNamespace string // Consul namespace the auth-method is defined in. + flagConsulServiceNamespace string // Consul destination namespace for the service. + flagServiceAccountName string // Service account name. + flagServiceName string // Service name. + flagLogLevel string + flagLogJSON bool + + flagBearerTokenFile string // Location of the bearer token. Default is /var/run/secrets/kubernetes.io/serviceaccount/token. + flagACLTokenSink string // Location to write the output token. Default is defaultTokenSinkFile. + flagProxyIDFile string // Location to write the output proxyID. Default is defaultProxyIDFile. + flagMultiPort bool serviceRegistrationPollingAttempts uint64 // Number of times to poll for this service to be registered. flagSet *flag.FlagSet - consul *flags.ConsulFlags + http *flags.HTTPFlags once sync.Once help string logger hclog.Logger - - watcher *discovery.Watcher - - nonRetryableError error - - // Only used in tests. - iptablesProvider iptables.Provider - iptablesConfig iptables.Config } func (c *Command) init() { c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + c.flagSet.StringVar(&c.flagACLAuthMethod, "acl-auth-method", "", "Name of the auth method to login to.") c.flagSet.StringVar(&c.flagPodName, "pod-name", "", "Name of the pod.") - c.flagSet.StringVar(&c.flagConsulNodeName, "consul-node-name", "", "Name of the Consul node where services are registered.") c.flagSet.StringVar(&c.flagPodNamespace, "pod-namespace", "", "Name of the pod namespace.") + c.flagSet.StringVar(&c.flagAuthMethodNamespace, "auth-method-namespace", "", "Consul namespace the auth-method is defined in") + c.flagSet.StringVar(&c.flagConsulServiceNamespace, "consul-service-namespace", "", "Consul destination namespace of the service.") c.flagSet.StringVar(&c.flagServiceAccountName, "service-account-name", "", "Service account name on the pod.") c.flagSet.StringVar(&c.flagServiceName, "service-name", "", "Service name as specified via the pod annotation.") + c.flagSet.StringVar(&c.flagBearerTokenFile, "bearer-token-file", defaultBearerTokenFile, "Path to service account token file.") + c.flagSet.StringVar(&c.flagACLTokenSink, "acl-token-sink", defaultTokenSinkFile, "File name where where ACL token should be saved.") c.flagSet.StringVar(&c.flagProxyIDFile, "proxy-id-file", defaultProxyIDFile, "File name where proxy's Consul service ID should be saved.") c.flagSet.BoolVar(&c.flagMultiPort, "multiport", false, "If the pod is a multi port pod.") - c.flagSet.StringVar(&c.flagGatewayKind, "gateway-kind", "", "Kind of gateway that is being registered: ingress-gateway, terminating-gateway, or mesh-gateway.") - c.flagSet.StringVar(&c.flagRedirectTrafficConfig, "redirect-traffic-config", os.Getenv("CONSUL_REDIRECT_TRAFFIC_CONFIG"), "Config (in JSON format) to configure iptables for this pod.") c.flagSet.StringVar(&c.flagLogLevel, "log-level", "info", "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ "\"debug\", \"info\", \"warn\", and \"error\".") @@ -89,8 +77,8 @@ func (c *Command) init() { c.serviceRegistrationPollingAttempts = defaultServicePollingRetries } - c.consul = &flags.ConsulFlags{} - flags.Merge(c.flagSet, c.consul.Flags()) + c.http = &flags.HTTPFlags{} + flags.Merge(c.flagSet, c.http.Flags()) c.help = flags.Usage(help, c.flagSet) } @@ -116,96 +104,53 @@ func (c *Command) Run(args []string) int { return 1 } } - - // Create Consul API config object. - consulConfig := c.consul.ConsulClientConfig() - - // Create a context to be used by the processes started in this command. - ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - defer cancelFunc() - - // Start Consul server Connection manager. - serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() - // Disable server watch because we only need to get server IPs once. - serverConnMgrCfg.ServerWatchDisabled = true - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) - return 1 - } - if c.watcher == nil { - c.watcher, err = discovery.NewWatcher(ctx, serverConnMgrCfg, c.logger.Named("consul-server-connection-manager")) - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) - return 1 - } - go c.watcher.Run() - defer c.watcher.Stop() - } - - state, err := c.watcher.State() + cfg := api.DefaultConfig() + cfg.Namespace = c.flagConsulServiceNamespace + c.http.MergeOntoConfig(cfg) + consulClient, err := consul.NewClient(cfg, c.http.ConsulAPITimeout()) if err != nil { - c.logger.Error("Unable to get state from consul-server-connection-manager", "error", err) - return 1 - } - - consulClient, err := consul.NewClientFromConnMgrState(consulConfig, state) - if err != nil { - if c.flagServiceAccountName == "default" { - c.logger.Warn("The service account name for this Pod is \"default\"." + - " In default installations this is not a supported service account name." + - " The service account name must match the name of the Kubernetes Service" + - " or the consul.hashicorp.com/connect-service annotation.") - } c.logger.Error("Unable to get client connection", "error", err) return 1 } - proxyService := &api.AgentService{} - if c.flagGatewayKind != "" { - err = backoff.Retry(c.getGatewayRegistration(consulClient), backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), c.serviceRegistrationPollingAttempts)) - if err != nil { - c.logger.Error("Timed out waiting for gateway registration", "error", err) - return 1 - } - if c.nonRetryableError != nil { - c.logger.Error("Error processing gateway registration", "error", c.nonRetryableError) - return 1 - } - } else { - var err = backoff.Retry(c.getConnectServiceRegistrations(consulClient, proxyService), backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), c.serviceRegistrationPollingAttempts)) - if err != nil { - c.logger.Error("Timed out waiting for service registration", "error", err) - return 1 - } - if c.nonRetryableError != nil { - c.logger.Error("Error processing service registration", "error", c.nonRetryableError) - return 1 - } - } - // todo (agentless): this should eventually be passed to consul-dataplane as a string so we don't need to write it to file. - if c.consul.UseTLS && c.consul.CACertPEM != "" { - if err = common.WriteFileWithPerms(connectinject.ConsulCAFile, c.consul.CACertPEM, 0444); err != nil { - c.logger.Error("error writing CA cert file", "error", err) - return 1 + // First do the ACL Login, if necessary. + if c.flagACLAuthMethod != "" { + // loginMeta is the default metadata that we pass to the consul login API. + loginMeta := map[string]string{"pod": fmt.Sprintf("%s/%s", c.flagPodNamespace, c.flagPodName)} + loginParams := common.LoginParams{ + AuthMethod: c.flagACLAuthMethod, + Namespace: c.flagAuthMethodNamespace, + BearerTokenFile: c.flagBearerTokenFile, + TokenSinkFile: c.flagACLTokenSink, + Meta: loginMeta, } - } - - if c.flagRedirectTrafficConfig != "" { - err = c.applyTrafficRedirectionRules(proxyService) + token, err := common.ConsulLogin(consulClient, loginParams, c.logger) if err != nil { - c.logger.Error("error applying traffic redirection rules", "err", err) + if c.flagServiceAccountName == "default" { + c.logger.Warn("The service account name for this Pod is \"default\"." + + " In default installations this is not a supported service account name." + + " The service account name must match the name of the Kubernetes Service" + + " or the consul.hashicorp.com/connect-service annotation.") + } + c.logger.Error("unable to complete login", "error", err) return 1 } + cfg.Token = token } - c.logger.Info("Connect initialization completed") - return 0 -} - -func (c *Command) getConnectServiceRegistrations(consulClient *api.Client, proxyService *api.AgentService) backoff.Operation { + // Now wait for the service to be registered. Do this by querying the Agent for a service + // which maps to this pod+namespace. var proxyID string registrationRetryCount := 0 - return func() error { + var errServiceNameMismatch error + // We need a new client so that we can use the ACL token that was fetched during login to do the next bit, + // otherwise `consulClient` will still be using the bearerToken that was passed in. + consulClient, err = consul.NewClient(cfg, c.http.ConsulAPITimeout()) + if err != nil { + c.logger.Error("Unable to update client connection", "error", err) + return 1 + } + err = backoff.Retry(func() error { registrationRetryCount++ filter := fmt.Sprintf("Meta[%q] == %q and Meta[%q] == %q ", connectinject.MetaKeyPodName, c.flagPodName, connectinject.MetaKeyKubeNS, c.flagPodNamespace) @@ -214,14 +159,13 @@ func (c *Command) getConnectServiceRegistrations(consulClient *api.Client, proxy // this one Pod. If so, we want to ensure the service and proxy matching our expected name is registered. filter += fmt.Sprintf(` and (Service == %q or Service == "%s-sidecar-proxy")`, c.flagServiceName, c.flagServiceName) } - serviceList, _, err := consulClient.Catalog().NodeServiceList(c.flagConsulNodeName, - &api.QueryOptions{Filter: filter, MergeCentralConfig: true}) + serviceList, err := consulClient.Agent().ServicesWithFilter(filter) if err != nil { - c.logger.Error("Unable to get services", "error", err) + c.logger.Error("Unable to get Agent services", "error", err) return err } // Wait for the service and the connect-proxy service to be registered. - if len(serviceList.Services) != 2 { + if len(serviceList) != 2 { c.logger.Info("Unable to find registered services; retrying") // Once every 10 times we're going to print this informational message to the pod logs so that // it is not "lost" to the user at the end of the retries when the pod enters a CrashLoop. @@ -229,33 +173,32 @@ func (c *Command) getConnectServiceRegistrations(consulClient *api.Client, proxy c.logger.Info("Check to ensure a Kubernetes service has been created for this application." + " If your pod is not starting also check the connect-inject deployment logs.") } - if len(serviceList.Services) > 2 { + if len(serviceList) > 2 { c.logger.Error("There are multiple Consul services registered for this pod when there must only be one." + " Check if there are multiple Kubernetes services selecting this pod and add the label" + " `consul.hashicorp.com/service-ignore: \"true\"` to all services except the one used by Consul for handling requests.") } - return fmt.Errorf("did not find correct number of services, found: %d, services: %+v", len(serviceList.Services), serviceList) + return fmt.Errorf("did not find correct number of services, found: %d, services: %+v", len(serviceList), serviceList) } - for _, svc := range serviceList.Services { + for _, svc := range serviceList { c.logger.Info("Registered service has been detected", "service", svc.Service) - if c.consul.ConsulLogin.AuthMethod != "" { + if c.flagACLAuthMethod != "" { if c.flagServiceName != "" && c.flagServiceAccountName != c.flagServiceName { - // Save an error but return nil so that we don't retry this step. - c.nonRetryableError = fmt.Errorf("service account name %s doesn't match annotation service name %s", c.flagServiceAccountName, c.flagServiceName) + // Set the error but return nil so we don't retry. + errServiceNameMismatch = fmt.Errorf("service account name %s doesn't match annotation service name %s", c.flagServiceAccountName, c.flagServiceName) return nil } if c.flagServiceName == "" && svc.Kind != api.ServiceKindConnectProxy && c.flagServiceAccountName != svc.Service { - // Save an error but return nil so that we don't retry this step. - c.nonRetryableError = fmt.Errorf("service account name %s doesn't match Consul service name %s", c.flagServiceAccountName, svc.Service) + // Set the error but return nil so we don't retry. + errServiceNameMismatch = fmt.Errorf("service account name %s doesn't match Consul service name %s", c.flagServiceAccountName, svc.Service) return nil } } if svc.Kind == api.ServiceKindConnectProxy { // This is the proxy service ID. proxyID = svc.ID - *proxyService = *svc } } @@ -263,78 +206,26 @@ func (c *Command) getConnectServiceRegistrations(consulClient *api.Client, proxy // In theory we can't reach this point unless we have 2 services registered against // this pod and neither are the connect-proxy. We don't support this case anyway, but it // is necessary to return from the function. - c.logger.Error("Unable to write proxy ID to file", "error", err) return fmt.Errorf("unable to find registered connect-proxy service") } - - // Write the proxy ID to the shared volume so `consul connect envoy` can use it for bootstrapping. - if err = common.WriteFileWithPerms(c.flagProxyIDFile, proxyID, os.FileMode(0444)); err != nil { - // Save an error but return nil so that we don't retry this step. - c.nonRetryableError = err - return nil - } - return nil + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), c.serviceRegistrationPollingAttempts)) + if err != nil { + c.logger.Error("Timed out waiting for service registration", "error", err) + return 1 } -} - -func (c *Command) getGatewayRegistration(client *api.Client) backoff.Operation { - var proxyID string - registrationRetryCount := 0 - return func() error { - registrationRetryCount++ - var gatewayList *api.CatalogNodeServiceList - var err error - filter := fmt.Sprintf("Meta[%q] == %q and Meta[%q] == %q ", - connectinject.MetaKeyPodName, c.flagPodName, connectinject.MetaKeyKubeNS, c.flagPodNamespace) - if c.consul.Namespace != "" { - gatewayList, _, err = client.Catalog().NodeServiceList(c.flagConsulNodeName, &api.QueryOptions{Filter: filter, Namespace: namespaces.WildcardNamespace}) - } else { - gatewayList, _, err = client.Catalog().NodeServiceList(c.flagConsulNodeName, &api.QueryOptions{Filter: filter}) - } - if err != nil { - c.logger.Error("Unable to get gateway", "error", err) - return err - } - // Wait for the service and the connect-proxy service to be registered. - if len(gatewayList.Services) != 1 { - c.logger.Info("Unable to find registered gateway; retrying") - // Once every 10 times we're going to print this informational message to the pod logs so that - // it is not "lost" to the user at the end of the retries when the pod enters a CrashLoop. - if registrationRetryCount%10 == 0 { - c.logger.Info("Check to ensure a Kubernetes service has been created for this application." + - " If your pod is not starting also check the connect-inject deployment logs.") - } - if len(gatewayList.Services) > 1 { - c.logger.Error("There are multiple Consul gateway services registered for this pod when there must only be one." + - " Check if there are multiple Kubernetes services selecting this gateway pod and add the label" + - " `consul.hashicorp.com/service-ignore: \"true\"` to all services except the one used by Consul for handling requests.") - } - return fmt.Errorf("did not find correct number of gateways, found: %d, services: %+v", len(gatewayList.Services), gatewayList) - } - for _, gateway := range gatewayList.Services { - switch gateway.Kind { - case api.ServiceKindMeshGateway, api.ServiceKindIngressGateway, api.ServiceKindTerminatingGateway: - proxyID = gateway.ID - } - } - if proxyID == "" { - // In theory we can't reach this point unless we have a service registered against - // this pod but it isnt a Connect Gateway. We don't support this case, but it - // is necessary to return from the function. - c.nonRetryableError = fmt.Errorf("unable to find registered connect-proxy service") - return nil - } - - // Write the proxy ID to the shared volume so the consul-dataplane can use it for bootstrapping. - if err := common.WriteFileWithPerms(c.flagProxyIDFile, proxyID, os.FileMode(0444)); err != nil { - // Save an error but return nil so that we don't retry this step. - c.nonRetryableError = err - return nil - } - - return nil + if errServiceNameMismatch != nil { + c.logger.Error(errServiceNameMismatch.Error()) + return 1 + } + // Write the proxy ID to the shared volume so `consul connect envoy` can use it for bootstrapping. + err = common.WriteFileWithPerms(c.flagProxyIDFile, proxyID, os.FileMode(0444)) + if err != nil { + c.logger.Error("Unable to write proxy ID to file", "error", err) + return 1 } + c.logger.Info("Connect initialization completed") + return 0 } func (c *Command) validateFlags() error { @@ -344,13 +235,13 @@ func (c *Command) validateFlags() error { if c.flagPodNamespace == "" { return errors.New("-pod-namespace must be set") } - if c.consul.ConsulLogin.AuthMethod != "" && c.flagServiceAccountName == "" && c.flagGatewayKind == "" { + if c.flagACLAuthMethod != "" && c.flagServiceAccountName == "" { return errors.New("-service-account-name must be set when ACLs are enabled") } - if c.flagConsulNodeName == "" { - return errors.New("-consul-node-name must be set") - } + if c.http.ConsulAPITimeout() <= 0 { + return errors.New("-consul-api-timeout must be set to a value greater than 0") + } return nil } @@ -360,58 +251,6 @@ func (c *Command) Help() string { return c.help } -// This below implementation is loosely based on -// https://github.com/hashicorp/consul/blob/fe2d41ddad9ba2b8ff86cbdebbd8f05855b1523c/command/connect/redirecttraffic/redirect_traffic.go#L136. - -// trafficRedirectProxyConfig is a snippet of xds/config.go -// with only the configuration values that we need to parse from Proxy.Config -// to apply traffic redirection rules. -type trafficRedirectProxyConfig struct { - BindPort int `mapstructure:"bind_port"` - StatsBindAddr string `mapstructure:"envoy_stats_bind_addr"` -} - -func (c *Command) applyTrafficRedirectionRules(svc *api.AgentService) error { - err := json.Unmarshal([]byte(c.flagRedirectTrafficConfig), &c.iptablesConfig) - if err != nil { - return err - } - if c.iptablesProvider != nil { - c.iptablesConfig.IptablesProvider = c.iptablesProvider - } - - if svc.Proxy.TransparentProxy != nil && svc.Proxy.TransparentProxy.OutboundListenerPort != 0 { - c.iptablesConfig.ProxyOutboundPort = svc.Proxy.TransparentProxy.OutboundListenerPort - } - - // Decode proxy's opaque config so that we can use it later to configure - // traffic redirection with iptables. - var trCfg trafficRedirectProxyConfig - if err = mapstructure.WeakDecode(svc.Proxy.Config, &trCfg); err != nil { - return fmt.Errorf("failed parsing Proxy.Config: %s", err) - } - if trCfg.BindPort != 0 { - c.iptablesConfig.ProxyInboundPort = trCfg.BindPort - } - - if trCfg.StatsBindAddr != "" { - _, port, err := net.SplitHostPort(trCfg.StatsBindAddr) - if err != nil { - return fmt.Errorf("failed parsing host and port from envoy_stats_bind_addr: %s", err) - } - - c.iptablesConfig.ExcludeInboundPorts = append(c.iptablesConfig.ExcludeInboundPorts, port) - } - - // Configure any relevant information from the proxy service - err = iptables.Setup(c.iptablesConfig) - if err != nil { - return err - } - c.logger.Info("Successfully applied traffic redirection rules") - return nil -} - const synopsis = "Inject connect init command." const help = ` Usage: consul-k8s-control-plane connect-init [options] diff --git a/control-plane/subcommand/connect-init/command_ent_test.go b/control-plane/subcommand/connect-init/command_ent_test.go index e9d65b05e8..4e23fb2244 100644 --- a/control-plane/subcommand/connect-init/command_ent_test.go +++ b/control-plane/subcommand/connect-init/command_ent_test.go @@ -6,56 +6,162 @@ import ( "fmt" "math/rand" "os" - "strconv" "testing" - connectinject "github.com/hashicorp/consul-k8s/control-plane/connect-inject" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/namespaces" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) -func TestRun_WithNamespaces(t *testing.T) { +func TestRun_ServicePollingWithACLsAndTLSWithNamespaces(t *testing.T) { t.Parallel() cases := []struct { name string + tls bool consulServiceNamespace string + acls bool + authMethodNamespace string + adminPartition string }{ { - name: "serviceNS=default", + name: "ACLs enabled, no tls, serviceNS=default, authMethodNS=default, partition=default", + tls: false, consulServiceNamespace: "default", + authMethodNamespace: "default", + acls: true, + adminPartition: "default", }, { - name: "serviceNS=default-ns", + name: "ACLs enabled, tls, serviceNS=default, authMethodNS=default, partition=default", + tls: true, + consulServiceNamespace: "default", + authMethodNamespace: "default", + acls: true, + adminPartition: "default", + }, + { + name: "ACLs enabled, no tls, serviceNS=default-ns, authMethodNS=default, partition=default", + tls: false, + consulServiceNamespace: "default-ns", + authMethodNamespace: "default", + acls: true, + adminPartition: "default", + }, + { + name: "ACLs enabled, tls, serviceNS=default-ns, authMethodNS=default, partition=default", + tls: true, consulServiceNamespace: "default-ns", + authMethodNamespace: "default", + acls: true, + adminPartition: "default", }, { - name: "serviceNS=other", + name: "ACLs enabled, no tls, serviceNS=other, authMethodNS=other, partition=default", + tls: false, consulServiceNamespace: "other", + authMethodNamespace: "other", + acls: true, + adminPartition: "default", + }, + { + name: "ACLs enabled, tls, serviceNS=other, authMethodNS=other, partition=default", + tls: true, + consulServiceNamespace: "other", + authMethodNamespace: "other", + acls: true, + adminPartition: "default", + }, + { + name: "ACLs disabled, no tls, serviceNS=default, authMethodNS=default, partition=default", + tls: false, + consulServiceNamespace: "default", + authMethodNamespace: "default", + adminPartition: "default", + }, + { + name: "ACLs disabled, tls, serviceNS=default, authMethodNS=default, partition=default", + tls: true, + consulServiceNamespace: "default", + authMethodNamespace: "default", + adminPartition: "default", + }, + { + name: "ACLs disabled, no tls, serviceNS=default-ns, authMethodNS=default, partition=default", + tls: false, + consulServiceNamespace: "default-ns", + authMethodNamespace: "default", + adminPartition: "default", + }, + { + name: "ACLs disabled, tls, serviceNS=default-ns, authMethodNS=default, partition=default", + tls: true, + consulServiceNamespace: "default-ns", + authMethodNamespace: "default", + adminPartition: "default", + }, + { + name: "ACLs disabled, no tls, serviceNS=other, authMethodNS=other, partition=default", + tls: false, + consulServiceNamespace: "other", + authMethodNamespace: "other", + adminPartition: "default", + }, + { + name: "ACLs disabled, tls, serviceNS=other, authMethodNS=other, partition=default", + tls: true, + consulServiceNamespace: "other", + authMethodNamespace: "other", + adminPartition: "default", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { + bearerFile := common.WriteTempFile(t, test.ServiceAccountJWTToken) tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) proxyFile := fmt.Sprintf("/tmp/%d2", rand.Int()) t.Cleanup(func() { - _ = os.RemoveAll(proxyFile) - _ = os.RemoveAll(tokenFile) + os.Remove(proxyFile) + os.Remove(tokenFile) }) + var caFile, certFile, keyFile string // Start Consul server with ACLs enabled and default deny policy. - var serverCfg *testutil.TestServerConfig + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" server, err := testutil.NewTestServerConfigT(t, func(cfg *testutil.TestServerConfig) { - serverCfg = cfg + if c.acls { + cfg.ACL.Enabled = true + cfg.ACL.DefaultPolicy = "deny" + cfg.ACL.Tokens.InitialManagement = masterToken + } + if c.tls { + caFile, certFile, keyFile = test.GenerateServerCerts(t) + cfg.CAFile = caFile + cfg.CertFile = certFile + cfg.KeyFile = keyFile + } }) require.NoError(t, err) defer server.Stop() server.WaitForLeader(t) cfg := &api.Config{ + Scheme: "http", Address: server.HTTPAddr, Namespace: c.consulServiceNamespace, + Partition: c.adminPartition, + } + if c.acls { + cfg.Token = masterToken + } + if c.tls { + cfg.Address = server.HTTPSAddr + cfg.Scheme = "https" + cfg.TLSConfig = api.TLSConfig{ + CAFile: caFile, + } } consulClient, err := api.NewClient(cfg) @@ -64,16 +170,14 @@ func TestRun_WithNamespaces(t *testing.T) { _, err = namespaces.EnsureExists(consulClient, c.consulServiceNamespace, "") require.NoError(t, err) + if c.acls { + test.SetupK8sAuthMethodWithNamespaces(t, consulClient, testServiceAccountName, "default-ns", c.authMethodNamespace, c.authMethodNamespace != c.consulServiceNamespace, "") + } + // Register Consul services. - testConsulServices := []api.AgentService{consulCountingSvc, consulCountingSvcSidecar} + testConsulServices := []api.AgentServiceRegistration{consulCountingSvc, consulCountingSvcSidecar} for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: connectinject.ConsulNodeName, - Address: "127.0.0.1", - Service: &svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) } ui := cli.NewMockUi() @@ -81,22 +185,43 @@ func TestRun_WithNamespaces(t *testing.T) { UI: ui, serviceRegistrationPollingAttempts: 5, } - // We build the consul-addr because normally it's defined by the init container setting + // We build the http-addr because normally it's defined by the init container setting // CONSUL_HTTP_ADDR when it processes the command template. flags := []string{"-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), - "-namespace", c.consulServiceNamespace, + "-service-account-name", testServiceAccountName, + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-consul-service-namespace", c.consulServiceNamespace, + "-acl-token-sink", tokenFile, + "-bearer-token-file", bearerFile, "-proxy-id-file", proxyFile, - "-consul-node-name", connectinject.ConsulNodeName, + "-consul-api-timeout", "5s", + } + if c.acls { + flags = append(flags, "-acl-auth-method", test.AuthMethod, "-auth-method-namespace", c.authMethodNamespace) + } + // Add the CA File if necessary since we're not setting CONSUL_CACERT in test ENV. + if c.tls { + flags = append(flags, "-ca-file", caFile) } - // Run the command. code := cmd.Run(flags) require.Equal(t, 0, code, ui.ErrorWriter.String()) + if c.acls { + // Validate the ACL token was written. + tokenData, err := os.ReadFile(tokenFile) + require.NoError(t, err) + require.NotEmpty(t, tokenData) + + // Check that the token has the metadata with pod name and pod namespace. + consulClient, err = api.NewClient(&api.Config{Address: server.HTTPAddr, Token: string(tokenData), Namespace: c.consulServiceNamespace}) + require.NoError(t, err) + token, _, err := consulClient.ACL().TokenReadSelf(&api.QueryOptions{Namespace: c.authMethodNamespace}) + require.NoError(t, err) + require.Equal(t, "token created via login: {\"pod\":\"default-ns/counting-pod\"}", token.Description) + } + // Validate contents of proxyFile. data, err := os.ReadFile(proxyFile) require.NoError(t, err) diff --git a/control-plane/subcommand/connect-init/command_test.go b/control-plane/subcommand/connect-init/command_test.go index 18d72a428b..58514d98d9 100644 --- a/control-plane/subcommand/connect-init/command_test.go +++ b/control-plane/subcommand/connect-init/command_test.go @@ -1,21 +1,19 @@ package connectinit import ( - "encoding/json" "fmt" "math/rand" + "net/http" + "net/http/httptest" + "net/url" "os" "strconv" - "strings" - "sync" "testing" "time" - connectinject "github.com/hashicorp/consul-k8s/control-plane/connect-inject" "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/iptables" "github.com/hashicorp/consul/sdk/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" @@ -39,24 +37,25 @@ func TestRun_FlagValidation(t *testing.T) { flags: []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-auth-method-name", test.AuthMethod}, + "-acl-auth-method", test.AuthMethod}, expErr: "-service-account-name must be set when ACLs are enabled", }, { flags: []string{ "-pod-name", testPodName, - "-pod-namespace", testPodNamespace}, - expErr: "-consul-node-name must be set", + "-pod-namespace", testPodNamespace, + "-acl-auth-method", test.AuthMethod, + "-service-account-name", "foo"}, + expErr: "-consul-api-timeout must be set to a value greater than 0", }, { flags: []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-auth-method-name", test.AuthMethod, + "-acl-auth-method", test.AuthMethod, "-service-account-name", "foo", - "-log-level", "invalid", - "-consul-node-name", "bar", - }, + "-consul-api-timeout", "5s", + "-log-level", "invalid"}, expErr: "unknown log level: invalid", }, } @@ -73,15 +72,14 @@ func TestRun_FlagValidation(t *testing.T) { } } -// TestRun_ConnectServices tests that the command can log in to Consul (if ACLs are enabled) using a kubernetes -// auth method and using the obtained token find the services for the provided pod name -// and namespace provided and write the proxy ID of the proxy service to a file. -func TestRun_ConnectServices(t *testing.T) { +// TestRun_ServicePollingWithACLsAndTLS bootstraps and starts a consul server using a mock +// kubernetes server to provide responses for setting up the consul AuthMethod +// then validates that the command runs end to end successfully. Also tests with TLS on/off. +func TestRun_ServicePollingWithACLsAndTLS(t *testing.T) { t.Parallel() - cases := []struct { name string - aclsEnabled bool + tls bool serviceAccountName string serviceName string includeServiceAccountName bool @@ -90,25 +88,44 @@ func TestRun_ConnectServices(t *testing.T) { multiport bool }{ { - name: "service-name not provided", + name: "ACLs enabled, no tls", + tls: false, serviceAccountName: "counting", }, { - name: "multi-port service", + name: "ACLs enabled, tls", + tls: true, + serviceAccountName: "counting", + }, + { + name: "ACLs enabled, K8s service name matches service account name", + tls: false, + serviceAccountName: "counting", + serviceName: "", + }, + { + name: "ACLs enabled, service name annotation matches service account name", + tls: false, + serviceAccountName: "web", + serviceName: "web", + }, + { + name: "ACLs enabled, multiport service", + tls: false, serviceAccountName: "counting-admin", serviceName: "counting-admin", multiport: true, }, { - name: "acls enabled; service name annotation doesn't match service account name", - aclsEnabled: true, + name: "ACLs enabled, service name annotation doesn't match service account name", + tls: false, serviceAccountName: "not-a-match", serviceName: "web", expFail: true, }, { - name: "acls enabled; K8s service name doesn't match service account name", - aclsEnabled: true, + name: "ACLs enabled, K8s service name doesn't match service account name", + tls: false, serviceAccountName: "not-a-match", serviceName: "", expFail: true, @@ -116,17 +133,27 @@ func TestRun_ConnectServices(t *testing.T) { } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { + bearerFile := common.WriteTempFile(t, test.ServiceAccountJWTToken) tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) proxyFile := fmt.Sprintf("/tmp/%d2", rand.Int()) t.Cleanup(func() { - _ = os.RemoveAll(proxyFile) - _ = os.RemoveAll(tokenFile) + os.Remove(proxyFile) + os.Remove(tokenFile) }) + var caFile, certFile, keyFile string // Start Consul server with ACLs enabled and default deny policy. - var serverCfg *testutil.TestServerConfig + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c + c.ACL.Enabled = true + c.ACL.DefaultPolicy = "deny" + c.ACL.Tokens.InitialManagement = masterToken + if tt.tls { + caFile, certFile, keyFile = test.GenerateServerCerts(t) + c.CAFile = caFile + c.CertFile = certFile + c.KeyFile = keyFile + } }) require.NoError(t, err) defer server.Stop() @@ -134,23 +161,27 @@ func TestRun_ConnectServices(t *testing.T) { cfg := &api.Config{ Scheme: "http", Address: server.HTTPAddr, + Token: masterToken, + } + if tt.tls { + cfg.Address = server.HTTPSAddr + cfg.Scheme = "https" + cfg.TLSConfig = api.TLSConfig{ + CAFile: caFile, + } } consulClient, err := api.NewClient(cfg) require.NoError(t, err) + test.SetupK8sAuthMethod(t, consulClient, testServiceAccountName, "default") + // Register Consul services. - testConsulServices := []api.AgentService{consulCountingSvc, consulCountingSvcSidecar} + testConsulServices := []api.AgentServiceRegistration{consulCountingSvc, consulCountingSvcSidecar} if tt.multiport { testConsulServices = append(testConsulServices, consulCountingSvcMultiport, consulCountingSvcSidecarMultiport) } for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: connectinject.ConsulNodeName, - Address: "127.0.0.1", - Service: &svc, - } - _, err := consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) } ui := cli.NewMockUi() @@ -159,24 +190,24 @@ func TestRun_ConnectServices(t *testing.T) { serviceRegistrationPollingAttempts: 3, } - // We build the consul-addr because normally it's defined by the init container setting + // We build the http-addr because normally it's defined by the init container setting // CONSUL_HTTP_ADDR when it processes the command template. flags := []string{"-pod-name", testPodName, "-pod-namespace", testPodNamespace, + "-acl-auth-method", test.AuthMethod, + "-service-account-name", tt.serviceAccountName, "-service-name", tt.serviceName, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-bearer-token-file", bearerFile, + "-acl-token-sink", tokenFile, "-proxy-id-file", proxyFile, "-multiport=" + strconv.FormatBool(tt.multiport), - "-consul-node-name", connectinject.ConsulNodeName, + "-consul-api-timeout=5s", } - if tt.aclsEnabled { - flags = append(flags, "-auth-method-name", test.AuthMethod, - "-service-account-name", tt.serviceAccountName, - "-acl-token-sink", tokenFile) + // Add the CA File if necessary since we're not setting CONSUL_CACERT in tt ENV. + if tt.tls { + flags = append(flags, "-ca-file", caFile) } - // Run the command. code := cmd.Run(flags) if tt.expFail { @@ -185,19 +216,17 @@ func TestRun_ConnectServices(t *testing.T) { } require.Equal(t, 0, code, ui.ErrorWriter.String()) - if tt.aclsEnabled { - // Validate the ACL token was written. - tokenData, err := os.ReadFile(tokenFile) - require.NoError(t, err) - require.NotEmpty(t, tokenData) - - // Check that the token has the metadata with pod name and pod namespace. - consulClient, err = api.NewClient(&api.Config{Address: server.HTTPAddr, Token: string(tokenData)}) - require.NoError(t, err) - token, _, err := consulClient.ACL().TokenReadSelf(nil) - require.NoError(t, err) - require.Equal(t, "token created via login: {\"pod\":\"default-ns/counting-pod\"}", token.Description) - } + // Validate the ACL token was written. + tokenData, err := os.ReadFile(tokenFile) + require.NoError(t, err) + require.NotEmpty(t, tokenData) + + // Check that the token has the metadata with pod name and pod namespace. + consulClient, err = api.NewClient(&api.Config{Address: server.HTTPAddr, Token: string(tokenData)}) + require.NoError(t, err) + token, _, err := consulClient.ACL().TokenReadSelf(nil) + require.NoError(t, err) + require.Equal(t, "token created via login: {\"pod\":\"default-ns/counting-pod\"}", token.Description) // Validate contents of proxyFile. data, err := os.ReadFile(proxyFile) @@ -211,100 +240,73 @@ func TestRun_ConnectServices(t *testing.T) { } } -// TestRun_Gateways tests that the command can log in to Consul (if ACLs are enabled) using a kubernetes -// auth method and using the obtained token find the service for the provided gateway -// and namespace provided and write the proxy ID of the gateway service to a file. -func TestRun_Gateways(t *testing.T) { +// This test validates service polling works in a happy case scenario with and without TLS. +func TestRun_ServicePollingOnly(t *testing.T) { t.Parallel() - cases := []struct { - name string - gatewayKind string - agentService api.AgentService - serviceName string - expFail bool + name string + tls bool + serviceName string + multiport bool }{ { - name: "mesh-gateway", - gatewayKind: "mesh-gateway", - agentService: api.AgentService{ - ID: "mesh-gateway", - Service: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Port: 4444, - Address: "127.0.0.1", - Meta: map[string]string{ - "component": "mesh-gateway", - metaKeyPodName: testGatewayName, - metaKeyKubeNS: "default-ns", - }, - }, + name: "ACLs disabled, no tls", + tls: false, }, { - name: "ingress-gateway", - gatewayKind: "ingress-gateway", - agentService: api.AgentService{ - ID: "ingress-gateway", - Service: "ingress-gateway", - Kind: api.ServiceKindMeshGateway, - Port: 4444, - Address: "127.0.0.1", - Meta: map[string]string{ - "component": "ingress-gateway", - metaKeyPodName: testGatewayName, - metaKeyKubeNS: "default-ns", - }, - }, + name: "ACLs disabled, tls", + tls: true, }, { - name: "terminating-gateway", - gatewayKind: "terminating-gateway", - agentService: api.AgentService{ - ID: "terminating-gateway", - Service: "terminating-gateway", - Kind: api.ServiceKindMeshGateway, - Port: 4444, - Address: "127.0.0.1", - Meta: map[string]string{ - "component": "terminating-gateway", - metaKeyPodName: testGatewayName, - metaKeyKubeNS: "default-ns", - }, - }, + name: "Multiport, ACLs disabled, no tls", + tls: false, + serviceName: "counting-admin", + multiport: true, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - proxyFile := fmt.Sprintf("/tmp/%d2", rand.Int()) + proxyFile := fmt.Sprintf("/tmp/%d", rand.Int()) t.Cleanup(func() { - _ = os.RemoveAll(proxyFile) + os.Remove(proxyFile) }) - // Start Consul server with ACLs enabled and default deny policy. - var serverCfg *testutil.TestServerConfig + var caFile, certFile, keyFile string + // Start Consul server with TLS enabled if required. server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c + if tt.tls { + caFile, certFile, keyFile = test.GenerateServerCerts(t) + c.CAFile = caFile + c.CertFile = certFile + c.KeyFile = keyFile + } }) require.NoError(t, err) defer server.Stop() server.WaitForLeader(t) + + // Get the Consul Client. cfg := &api.Config{ Scheme: "http", Address: server.HTTPAddr, } + if tt.tls { + cfg.Address = server.HTTPSAddr + cfg.Scheme = "https" + cfg.TLSConfig = api.TLSConfig{ + CAFile: caFile, + } + } consulClient, err := api.NewClient(cfg) require.NoError(t, err) // Register Consul services. - testConsulServices := []api.AgentService{tt.agentService} + testConsulServices := []api.AgentServiceRegistration{consulCountingSvc, consulCountingSvcSidecar} + if tt.multiport { + testConsulServices = append(testConsulServices, consulCountingSvcMultiport, consulCountingSvcSidecarMultiport) + } for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: connectinject.ConsulNodeName, - Address: "127.0.0.1", - Service: &svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) } ui := cli.NewMockUi() @@ -312,38 +314,46 @@ func TestRun_Gateways(t *testing.T) { UI: ui, serviceRegistrationPollingAttempts: 3, } - // We build the http-addr because normally it's defined by the init container setting // CONSUL_HTTP_ADDR when it processes the command template. - flags := []string{"-pod-name", testGatewayName, + flags := []string{ + "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-gateway-kind", tt.gatewayKind, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-proxy-id-file", proxyFile, - "-consul-node-name", connectinject.ConsulNodeName, + "-multiport=" + strconv.FormatBool(tt.multiport), + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-consul-api-timeout", "5s"} + + // In a multiport case, the service name will be passed in to the test. + if tt.serviceName != "" { + flags = append(flags, "-service-name", tt.serviceName) + } + + // Add the CA File if necessary since we're not setting CONSUL_CACERT in tt ENV. + if tt.tls { + flags = append(flags, "-ca-file", caFile) } // Run the command. code := cmd.Run(flags) - if tt.expFail { - require.Equal(t, 1, code) - return - } require.Equal(t, 0, code, ui.ErrorWriter.String()) // Validate contents of proxyFile. data, err := os.ReadFile(proxyFile) require.NoError(t, err) - require.Contains(t, string(data), tt.gatewayKind) + if tt.multiport { + require.Contains(t, string(data), "counting-admin-sidecar-proxy-id") + } else { + require.Contains(t, string(data), "counting-counting-sidecar-proxy") + } }) } + } -// TestRun_ConnectServices_Errors tests that when registered services could not be found, +// TestRun_ServicePollingErrors tests that when registered services could not be found, // we error out. -func TestRun_ConnectServices_Errors(t *testing.T) { +func TestRun_ServicePollingErrors(t *testing.T) { t.Parallel() cases := []struct { @@ -488,103 +498,7 @@ func TestRun_ConnectServices_Errors(t *testing.T) { t.Run(c.name, func(t *testing.T) { proxyFile := fmt.Sprintf("/tmp/%d", rand.Int()) t.Cleanup(func() { - os.RemoveAll(proxyFile) - }) - - // Start Consul server. - var serverCfg *testutil.TestServerConfig - server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c - }) - require.NoError(t, err) - defer server.Stop() - server.WaitForLeader(t) - consulClient, err := api.NewClient(&api.Config{Address: server.HTTPAddr}) - require.NoError(t, err) - - // Register Consul services. - for _, svc := range c.services { - require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) - } - - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - serviceRegistrationPollingAttempts: 1, - } - flags := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), - "-pod-name", testPodName, - "-pod-namespace", testPodNamespace, - "-proxy-id-file", proxyFile, - "-consul-node-name", connectinject.ConsulNodeName, - } - - code := cmd.Run(flags) - require.Equal(t, 1, code) - }) - } -} - -// TestRun_Gateways_Errors tests that when registered services could not be found, -// we error out. -func TestRun_Gateways_Errors(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - services []api.AgentServiceRegistration - }{ - { - name: "gateway without pod-name or k8s-namespace meta", - services: []api.AgentServiceRegistration{ - { - ID: "mesh-gateway", - Name: "mesh-gateway", - Kind: "mesh-gateway", - Port: 9999, - Address: "127.0.0.1", - }, - }, - }, - { - name: "gateway with pod-name meta but without k8s-namespace meta", - services: []api.AgentServiceRegistration{ - { - ID: "mesh-gateway", - Name: "mesh-gateway", - Kind: "mesh-gateway", - Port: 9999, - Address: "127.0.0.1", - Meta: map[string]string{ - metaKeyPodName: "mesh-gateway", - }, - }, - }, - }, - { - name: "service and proxy with k8s-namespace meta but pod-name meta", - services: []api.AgentServiceRegistration{ - { - ID: "mesh-gateway", - Name: "mesh-gateway", - Kind: "mesh-gateway", - Port: 9999, - Address: "127.0.0.1", - Meta: map[string]string{ - metaKeyKubeNS: "default-ns", - }, - }, - }}, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - proxyFile := fmt.Sprintf("/tmp/%d", rand.Int()) - t.Cleanup(func() { - os.RemoveAll(proxyFile) + os.Remove(proxyFile) }) // Start Consul server. @@ -607,12 +521,10 @@ func TestRun_Gateways_Errors(t *testing.T) { } flags := []string{ "-http-addr", server.HTTPAddr, - "-gateway-kind", "mesh-gateway", "-pod-name", testPodName, "-pod-namespace", testPodNamespace, "-proxy-id-file", proxyFile, "-consul-api-timeout", "5s", - "-consul-node-name", connectinject.ConsulNodeName, } code := cmd.Run(flags) @@ -628,10 +540,7 @@ func TestRun_RetryServicePolling(t *testing.T) { proxyFile := common.WriteTempFile(t, "") // Start Consul server. - var serverCfg *testutil.TestServerConfig - server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c - }) + server, err := testutil.NewTestServerConfigT(t, nil) require.NoError(t, err) defer server.Stop() server.WaitForLeader(t) @@ -640,24 +549,14 @@ func TestRun_RetryServicePolling(t *testing.T) { // Start the consul service registration in a go func and delay it so that it runs // after the cmd.Run() starts. - var wg sync.WaitGroup - wg.Add(1) go func() { - defer wg.Done() // Wait a moment, this ensures that we are already in the retry logic. time.Sleep(time.Second * 2) // Register counting service. - serviceRegistration := &api.CatalogRegistration{ - Node: connectinject.ConsulNodeName, - Address: "127.0.0.1", - Service: &consulCountingSvc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&consulCountingSvc)) + time.Sleep(time.Second * 2) // Register proxy sidecar service. - serviceRegistration.Service = &consulCountingSvcSidecar - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&consulCountingSvcSidecar)) }() ui := cli.NewMockUi() @@ -668,14 +567,11 @@ func TestRun_RetryServicePolling(t *testing.T) { flags := []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-http-addr", server.HTTPAddr, "-proxy-id-file", proxyFile, - "-consul-node-name", connectinject.ConsulNodeName, + "-consul-api-timeout", "5s", } code := cmd.Run(flags) - wg.Wait() require.Equal(t, 0, code) // Validate contents of proxyFile. @@ -692,10 +588,7 @@ func TestRun_InvalidProxyFile(t *testing.T) { randFileName := fmt.Sprintf("/foo/%d/%d", rand.Int(), rand.Int()) // Start Consul server. - var serverCfg *testutil.TestServerConfig - server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c - }) + server, err := testutil.NewTestServerConfigT(t, nil) require.NoError(t, err) defer server.Stop() server.WaitForLeader(t) @@ -703,15 +596,9 @@ func TestRun_InvalidProxyFile(t *testing.T) { require.NoError(t, err) // Register Consul services. - testConsulServices := []api.AgentService{consulCountingSvc, consulCountingSvcSidecar} + testConsulServices := []api.AgentServiceRegistration{consulCountingSvc, consulCountingSvcSidecar} for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: connectinject.ConsulNodeName, - Address: "127.0.0.1", - Service: &svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) } ui := cli.NewMockUi() cmd := Command{ @@ -721,9 +608,7 @@ func TestRun_InvalidProxyFile(t *testing.T) { flags := []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-http-addr", server.HTTPAddr, "-proxy-id-file", randFileName, "-consul-api-timeout", "5s", } @@ -733,155 +618,234 @@ func TestRun_InvalidProxyFile(t *testing.T) { require.Error(t, err) } -func TestRun_TrafficRedirection(t *testing.T) { - cases := map[string]struct { - proxyConfig map[string]interface{} - tproxyConfig api.TransparentProxyConfig - registerProxyDefaults bool - expIptablesParamsFunc func(actual iptables.Config) (bool, string) +// TestRun_FailsWithBadServerResponses tests error handling with invalid server responses. +func TestRun_FailsWithBadServerResponses(t *testing.T) { + t.Parallel() + const servicesGetRetries int = 2 + cases := []struct { + name string + loginResponse string + getServicesListResponse string + expectedServiceGets int }{ - "no extra proxy config provided": {}, - "envoy bind port is provided in service proxy config": { - proxyConfig: map[string]interface{}{"bind_port": "21000"}, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if actual.ProxyInboundPort == 21000 { - return true, "" - } else { - return false, fmt.Sprintf("ProxyInboundPort in iptables.Config was %d, but should be 21000", actual.ProxyInboundPort) - } - }, + { + name: "acls enabled, acl login response invalid", + loginResponse: "", + expectedServiceGets: 0, }, - // This test is to make sure that we use merge-central-config parameter when we query the service - // so that we get all config merged into the proxy configuration on the service. - "envoy bind port is provided in a config entry": { - proxyConfig: map[string]interface{}{"bind_port": "21000"}, - registerProxyDefaults: true, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if actual.ProxyInboundPort == 21000 { - return true, "" - } else { - return false, fmt.Sprintf("ProxyInboundPort in iptables.Config was %d, but should be 21000", actual.ProxyInboundPort) - } - }, + { + name: "acls enabled, get service response invalid", + loginResponse: testLoginResponse, + getServicesListResponse: "", + expectedServiceGets: servicesGetRetries + 1, // Plus 1 because we RETRY after an initial attempt. }, - "tproxy outbound listener port is provided in service proxy config": { - tproxyConfig: api.TransparentProxyConfig{OutboundListenerPort: 16000}, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if actual.ProxyOutboundPort == 16000 { - return true, "" - } else { - return false, fmt.Sprintf("ProxyOutboundPort in iptables.Config was %d, but should be 16000", actual.ProxyOutboundPort) + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + bearerFile := common.WriteTempFile(t, "bearerTokenFile") + tokenFile := common.WriteTempFile(t, "") + + servicesGetCounter := 0 + // Start the mock Consul server. + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ACL login request. + if r != nil && r.URL.Path == "/v1/acl/login" && r.Method == "POST" { + w.Write([]byte(c.loginResponse)) } - }, - }, - "tproxy outbound listener port is provided in a config entry": { - tproxyConfig: api.TransparentProxyConfig{OutboundListenerPort: 16000}, - registerProxyDefaults: true, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if actual.ProxyOutboundPort == 16000 { - return true, "" - } else { - return false, fmt.Sprintf("ProxyOutboundPort in iptables.Config was %d, but should be 16000", actual.ProxyOutboundPort) + // Token read request. + if r != nil && r.URL.Path == "/v1/acl/token/self" && r.Method == "GET" { + w.Write([]byte(testTokenReadSelfResponse)) } - }, - }, - "envoy stats addr is provided in service proxy config": { - proxyConfig: map[string]interface{}{"envoy_stats_bind_addr": "0.0.0.0:9090"}, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if len(actual.ExcludeInboundPorts) == 1 && actual.ExcludeInboundPorts[0] == "9090" { - return true, "" - } else { - return false, fmt.Sprintf("ExcludeInboundPorts in iptables.Config was %v, but should be [9090]", actual.ExcludeInboundPorts) + // Agent Services get. + if r != nil && r.URL.Path == "/v1/agent/services" && r.Method == "GET" { + servicesGetCounter++ + w.Write([]byte(c.getServicesListResponse)) } - }, + })) + defer consulServer.Close() + + // Set up the Command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + flagBearerTokenFile: bearerFile, + flagACLTokenSink: tokenFile, + serviceRegistrationPollingAttempts: uint64(servicesGetRetries), + } + + serverURL, err := url.Parse(consulServer.URL) + require.NoError(t, err) + flags := []string{ + "-pod-name", testPodName, "-pod-namespace", testPodNamespace, + "-acl-auth-method", test.AuthMethod, + "-service-account-name", testServiceAccountName, + "-bearer-token-file", bearerFile, + "-acl-token-sink", tokenFile, + "-http-addr", serverURL.String(), + "-consul-api-timeout", "5s", + } + code := cmd.Run(flags) + require.Equal(t, 1, code) + // We use the counter to ensure we failed at ACL Login (when counter = 0) or proceeded to the service get portion of the command. + require.Equal(t, c.expectedServiceGets, servicesGetCounter) + }) + } +} + +// Tests ACL Login with Retries. +func TestRun_LoginWithRetries(t *testing.T) { + t.Parallel() + cases := []struct { + Description string + TestRetry bool + LoginAttemptsCount int + ExpCode int + }{ + { + Description: "Login succeeds without retries", + TestRetry: false, + LoginAttemptsCount: 1, // 1 because we dont actually retry. + ExpCode: 0, }, - "envoy stats addr is provided in a config entry": { - proxyConfig: map[string]interface{}{"envoy_stats_bind_addr": "0.0.0.0:9090"}, - registerProxyDefaults: true, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if len(actual.ExcludeInboundPorts) == 1 && actual.ExcludeInboundPorts[0] == "9090" { - return true, "" - } else { - return false, fmt.Sprintf("ExcludeInboundPorts in iptables.Config was %v, but should be [9090]", actual.ExcludeInboundPorts) - } - }, + { + Description: "Login succeeds after 1 retry", + TestRetry: true, + LoginAttemptsCount: 2, + ExpCode: 0, }, } + for _, c := range cases { + t.Run(c.Description, func(t *testing.T) { + // Create a fake input bearer token file and an output file. + bearerFile := common.WriteTempFile(t, "bearerTokenFile") + tokenFile := common.WriteTempFile(t, "") + proxyFile := common.WriteTempFile(t, "") - for name, c := range cases { - t.Run(name, func(t *testing.T) { - proxyFile := fmt.Sprintf("/tmp/%d", rand.Int()) - t.Cleanup(func() { - _ = os.Remove(proxyFile) - }) + // Start the mock Consul server. + counter := 0 + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ACL Login. + if r != nil && r.URL.Path == "/v1/acl/login" && r.Method == "POST" { + counter++ + if !c.TestRetry || (c.TestRetry && c.LoginAttemptsCount == counter) { + w.Write([]byte(testLoginResponse)) + } + } + // Token read request. + if r != nil && r.URL.Path == "/v1/acl/token/self" && r.Method == "GET" { + w.Write([]byte(testTokenReadSelfResponse)) + } + // Agent Services get. + if r != nil && r.URL.Path == "/v1/agent/services" && r.Method == "GET" { + w.Write([]byte(testServiceListResponse)) + } + })) + defer consulServer.Close() - // Start Consul server. - var serverCfg *testutil.TestServerConfig - server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c - }) + serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - t.Cleanup(func() { - _ = server.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + code := cmd.Run([]string{ + "-pod-name", testPodName, + "-pod-namespace", testPodNamespace, + "-acl-auth-method", test.AuthMethod, + "-service-account-name", testServiceAccountName, + "-acl-token-sink", tokenFile, + "-bearer-token-file", bearerFile, + "-proxy-id-file", proxyFile, + "-http-addr", serverURL.String(), + "-consul-api-timeout", "5s", }) - server.WaitForLeader(t) - consulClient, err := api.NewClient(&api.Config{Address: server.HTTPAddr}) + fmt.Println(ui.ErrorWriter.String()) + require.Equal(t, c.ExpCode, code) + // Cmd will return 1 after numACLLoginRetries, so bound LoginAttemptsCount if we exceeded it. + require.Equal(t, c.LoginAttemptsCount, counter) + // Validate that the token was written to disk if we succeeded. + tokenData, err := os.ReadFile(tokenFile) require.NoError(t, err) + require.Equal(t, "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586", string(tokenData)) + // Validate contents of proxyFile. + proxydata, err := os.ReadFile(proxyFile) + require.NoError(t, err) + require.Equal(t, "counting-counting-sidecar-proxy", string(proxydata)) + }) + } +} - // Add additional proxy configuration either to a config entry or to the service itself. - if c.registerProxyDefaults { - _, _, err = consulClient.ConfigEntries().Set(&api.ProxyConfigEntry{ - Name: api.ProxyConfigGlobal, - Kind: api.ProxyDefaults, - TransparentProxy: &c.tproxyConfig, - Config: c.proxyConfig, - }, nil) - require.NoError(t, err) - } else { - consulCountingSvcSidecar.Proxy.TransparentProxy = &c.tproxyConfig - consulCountingSvcSidecar.Proxy.Config = c.proxyConfig - } - // Register Consul services. - testConsulServices := []api.AgentService{consulCountingSvc, consulCountingSvcSidecar} - for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: connectinject.ConsulNodeName, - Address: "127.0.0.1", - Service: &svc, +// Test that we check token exists when reading it in the stale consistency mode. +func TestRun_EnsureTokenExists(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + neverSucceed bool + }{ + "succeed after first retry": {neverSucceed: false}, + "never succeed": {neverSucceed: true}, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + // Create a fake input bearer token file and an output file. + bearerFile := common.WriteTempFile(t, "bearerTokenFile") + tokenFile := common.WriteTempFile(t, "") + proxyFile := common.WriteTempFile(t, "") + + // Start the mock Consul server. + counter := 0 + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ACL Login. + if r != nil && r.URL.Path == "/v1/acl/login" && r.Method == "POST" { + w.Write([]byte(testLoginResponse)) } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) - } - ui := cli.NewMockUi() + // Token read request. + if r != nil && + r.URL.Path == "/v1/acl/token/self" && + r.Method == "GET" && + r.URL.Query().Has("stale") { - iptablesProvider := &fakeIptablesProvider{} - iptablesCfg := iptables.Config{ - ProxyUserID: "5995", - ProxyInboundPort: 20000, - } + // Fail the first request but succeed on the next. + if counter == 0 || c.neverSucceed { + counter++ + w.WriteHeader(http.StatusForbidden) + w.Write([]byte("ACL not found")) + } else { + w.Write([]byte(testTokenReadSelfResponse)) + } + } + // Agent Services get. + if r != nil && r.URL.Path == "/v1/agent/services" && r.Method == "GET" { + w.Write([]byte(testServiceListResponse)) + } + })) + defer consulServer.Close() + + serverURL, err := url.Parse(consulServer.URL) + require.NoError(t, err) + + ui := cli.NewMockUi() cmd := Command{ - UI: ui, - serviceRegistrationPollingAttempts: 3, - iptablesProvider: iptablesProvider, + UI: ui, } - iptablesCfgJSON, err := json.Marshal(iptablesCfg) - require.NoError(t, err) - flags := []string{ + code := cmd.Run([]string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-consul-node-name", connectinject.ConsulNodeName, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-acl-auth-method", test.AuthMethod, + "-service-account-name", testServiceAccountName, + "-acl-token-sink", tokenFile, + "-bearer-token-file", bearerFile, "-proxy-id-file", proxyFile, - "-redirect-traffic-config", string(iptablesCfgJSON), - } - code := cmd.Run(flags) - require.Equal(t, 0, code, ui.ErrorWriter.String()) - require.Truef(t, iptablesProvider.applyCalled, "redirect traffic rules were not applied") - if c.expIptablesParamsFunc != nil { - actualIptablesConfigParamsEqualExpected, errMsg := c.expIptablesParamsFunc(cmd.iptablesConfig) - require.Truef(t, actualIptablesConfigParamsEqualExpected, errMsg) + "-http-addr", serverURL.String(), + "-consul-api-timeout", "5s", + }) + if c.neverSucceed { + require.Equal(t, 1, code) + } else { + require.Equal(t, 0, code) + require.Equal(t, 1, counter) } }) } @@ -893,13 +857,129 @@ const ( metaKeyKubeServiceName = "k8s-service-name" testPodNamespace = "default-ns" testPodName = "counting-pod" - testGatewayName = "gateway-pod" + testServiceAccountName = "counting" + + // Sample response from https://consul.io/api-docs/acl#sample-response. + testLoginResponse = `{ + "AccessorID": "926e2bd2-b344-d91b-0c83-ae89f372cd9b", + "SecretID": "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586", + "Description": "token created via login", + "Roles": [ + { + "ID": "3356c67c-5535-403a-ad79-c1d5f9df8fc7", + "Name": "demo" + } + ], + "ServiceIdentities": [ + { + "ServiceName": "example" + } + ], + "Local": true, + "AuthMethod": "minikube", + "CreateTime": "2019-04-29T10:08:08.404370762-05:00", + "Hash": "nLimyD+7l6miiHEBmN/tvCelAmE/SbIXxcnTzG3pbGY=", + "CreateIndex": 36, + "ModifyIndex": 36 +}` + + // Sample response from https://www.consul.io/api-docs/acl/tokens#read-self-token. + testTokenReadSelfResponse = ` +{ + "AccessorID": "6a1253d2-1785-24fd-91c2-f8e78c745511", + "SecretID": "45a3bd52-07c7-47a4-52fd-0745e0cfe967", + "Description": "Agent token for 'node1'", + "Policies": [ + { + "ID": "165d4317-e379-f732-ce70-86278c4558f7", + "Name": "node1-write" + }, + { + "ID": "e359bd81-baca-903e-7e64-1ccd9fdc78f5", + "Name": "node-read" + } + ], + "Local": false, + "CreateTime": "2018-10-24T12:25:06.921933-04:00", + "Hash": "UuiRkOQPRCvoRZHRtUxxbrmwZ5crYrOdZ0Z1FTFbTbA=", + "CreateIndex": 59, + "ModifyIndex": 59 +} +` + + testServiceListResponse = `{ + "counting-counting": { + "ID": "counting-counting", + "Service": "counting", + "Tags": [], + "Meta": { + "k8s-namespace": "default", + "pod-name": "counting-pod", + "k8s-service-name": "counting" + }, + "Port": 9001, + "Address": "10.32.3.26", + "TaggedAddresses": { + "lan_ipv4": { + "Address": "10.32.3.26", + "Port": 9001 + }, + "wan_ipv4": { + "Address": "10.32.3.26", + "Port": 9001 + } + }, + "Weights": { + "Passing": 1, + "Warning": 1 + }, + "EnableTagOverride": false, + "Datacenter": "dc1" + }, + "counting-counting-sidecar-proxy": { + "Kind": "connect-proxy", + "ID": "counting-counting-sidecar-proxy", + "Service": "counting-sidecar-proxy", + "Tags": [], + "Meta": { + "k8s-namespace": "default", + "pod-name": "counting-pod", + "k8s-service-name": "counting" + }, + "Port": 20000, + "Address": "10.32.3.26", + "TaggedAddresses": { + "lan_ipv4": { + "Address": "10.32.3.26", + "Port": 20000 + }, + "wan_ipv4": { + "Address": "10.32.3.26", + "Port": 20000 + } + }, + "Weights": { + "Passing": 1, + "Warning": 1 + }, + "EnableTagOverride": false, + "Proxy": { + "DestinationServiceName": "counting", + "DestinationServiceID": "counting-counting", + "LocalServiceAddress": "127.0.0.1", + "LocalServicePort": 9001, + "MeshGateway": {}, + "Expose": {} + }, + "Datacenter": "dc1" + } +}` ) var ( - consulCountingSvc = api.AgentService{ + consulCountingSvc = api.AgentServiceRegistration{ ID: "counting-counting", - Service: "counting", + Name: "counting", Address: "127.0.0.1", Meta: map[string]string{ metaKeyPodName: "counting-pod", @@ -907,13 +987,15 @@ var ( metaKeyKubeServiceName: "counting", }, } - consulCountingSvcSidecar = api.AgentService{ - ID: "counting-counting-sidecar-proxy", - Service: "counting-sidecar-proxy", - Kind: "connect-proxy", + consulCountingSvcSidecar = api.AgentServiceRegistration{ + ID: "counting-counting-sidecar-proxy", + Name: "counting-sidecar-proxy", + Kind: "connect-proxy", Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "counting", DestinationServiceID: "counting-counting", + Config: nil, + Upstreams: nil, }, Port: 9999, Address: "127.0.0.1", @@ -923,9 +1005,9 @@ var ( metaKeyKubeServiceName: "counting", }, } - consulCountingSvcMultiport = api.AgentService{ + consulCountingSvcMultiport = api.AgentServiceRegistration{ ID: "counting-admin-id", - Service: "counting-admin", + Name: "counting-admin", Address: "127.0.0.1", Meta: map[string]string{ metaKeyPodName: "counting-pod", @@ -933,13 +1015,15 @@ var ( metaKeyKubeServiceName: "counting-admin", }, } - consulCountingSvcSidecarMultiport = api.AgentService{ - ID: "counting-admin-sidecar-proxy-id", - Service: "counting-admin-sidecar-proxy", - Kind: "connect-proxy", + consulCountingSvcSidecarMultiport = api.AgentServiceRegistration{ + ID: "counting-admin-sidecar-proxy-id", + Name: "counting-admin-sidecar-proxy", + Kind: "connect-proxy", Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "counting-admin", DestinationServiceID: "counting-admin-id", + Config: nil, + Upstreams: nil, }, Port: 9999, Address: "127.0.0.1", @@ -950,21 +1034,3 @@ var ( }, } ) - -type fakeIptablesProvider struct { - applyCalled bool - rules []string -} - -func (f *fakeIptablesProvider) AddRule(_ string, args ...string) { - f.rules = append(f.rules, strings.Join(args, " ")) -} - -func (f *fakeIptablesProvider) ApplyRules() error { - f.applyCalled = true - return nil -} - -func (f *fakeIptablesProvider) Rules() []string { - return f.rules -} diff --git a/control-plane/subcommand/consul-logout/command_test.go b/control-plane/subcommand/consul-logout/command_test.go index 22412ea752..877898056d 100644 --- a/control-plane/subcommand/consul-logout/command_test.go +++ b/control-plane/subcommand/consul-logout/command_test.go @@ -63,7 +63,7 @@ func TestRun_InvalidSinkFile(t *testing.T) { func Test_UnableToLogoutDueToInvalidToken(t *testing.T) { tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) t.Cleanup(func() { - os.RemoveAll(tokenFile) + os.Remove(tokenFile) }) var caFile, certFile, keyFile string @@ -118,7 +118,7 @@ func Test_RunUsingLogin(t *testing.T) { // This is the test file that we will write the token to so consul-logout can read it. tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) t.Cleanup(func() { - os.RemoveAll(tokenFile) + os.Remove(tokenFile) }) // Start Consul server with ACLs enabled and default deny policy. diff --git a/control-plane/subcommand/consul-sidecar/command.go b/control-plane/subcommand/consul-sidecar/command.go new file mode 100644 index 0000000000..6ea77f635c --- /dev/null +++ b/control-plane/subcommand/consul-sidecar/command.go @@ -0,0 +1,427 @@ +package consulsidecar + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "os/signal" + "strings" + "sync" + "syscall" + "time" + + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" +) + +const ( + metricsServerShutdownTimeout = 5 * time.Second + envoyMetricsAddr = "http://127.0.0.1:19000/stats/prometheus" + // prometheusServiceMetricsSuccessKey is the key of the prometheus metric used to + // indicate if service metrics were scraped successfully. + prometheusServiceMetricsSuccessKey = "consul_merged_service_metrics_success" +) + +type Command struct { + UI cli.Ui + + http *flags.HTTPFlags + flagEnableServiceRegistration bool + flagServiceConfig string + flagConsulBinary string + flagSyncPeriod time.Duration + flagSet *flag.FlagSet + flagLogLevel string + flagLogJSON bool + + // Flags to configure metrics merging + flagEnableMetricsMerging bool + flagMergedMetricsPort string + flagServiceMetricsPort string + flagServiceMetricsPath string + + envoyMetricsGetter metricsGetter + serviceMetricsGetter metricsGetter + + consulCommand []string + + logger hclog.Logger + once sync.Once + help string + sigCh chan os.Signal +} + +// metricsGetter abstracts the function of retrieving metrics. It is used to +// enable easier unit testing. +type metricsGetter interface { + Get(url string) (resp *http.Response, err error) +} + +func (c *Command) init() { + c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + c.flagSet.BoolVar(&c.flagEnableServiceRegistration, "enable-service-registration", true, "Enables consul sidecar to register the service with consul every sync period. Defaults to true.") + c.flagSet.StringVar(&c.flagServiceConfig, "service-config", "", "Path to the service config file") + c.flagSet.StringVar(&c.flagConsulBinary, "consul-binary", "consul", "Path to a consul binary") + c.flagSet.DurationVar(&c.flagSyncPeriod, "sync-period", 10*time.Second, "Time between syncing the service registration. Defaults to 10s.") + c.flagSet.StringVar(&c.flagLogLevel, "log-level", "info", + "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ + "\"debug\", \"info\", \"warn\", and \"error\". Defaults to info.") + c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + + c.flagSet.BoolVar(&c.flagEnableMetricsMerging, "enable-metrics-merging", false, "Enables consul sidecar to run a merged metrics endpoint. Defaults to false.") + // -merged-metrics-port, -service-metrics-port, and -service-metrics-path + // are only used if metrics merging is enabled. -merged-metrics-port and + // -service-metrics-path have defaults, and -service-metrics-port is + // expected to be set by the connect-inject handler to a valid value. The + // connect-inject handler will only enable metrics merging in the consul + // sidecar if it finds a service metrics port greater than 0. + c.flagSet.StringVar(&c.flagMergedMetricsPort, "merged-metrics-port", "20100", "Port to serve merged Envoy and application metrics. Defaults to 20100.") + c.flagSet.StringVar(&c.flagServiceMetricsPort, "service-metrics-port", "0", "Port where application metrics are being served. Defaults to 0.") + c.flagSet.StringVar(&c.flagServiceMetricsPath, "service-metrics-path", "/metrics", "Path where application metrics are being served. Defaults to /metrics.") + c.help = flags.Usage(help, c.flagSet) + c.http = &flags.HTTPFlags{} + flags.Merge(c.flagSet, c.http.Flags()) + c.help = flags.Usage(help, c.flagSet) + + // Wait on an interrupt or terminate to exit. This channel must be initialized before + // Run() is called so that there are no race conditions where the channel + // is not defined. + if c.sigCh == nil { + c.sigCh = make(chan os.Signal, 1) + signal.Notify(c.sigCh, syscall.SIGINT, syscall.SIGTERM) + } +} + +// Run continually re-registers the service with Consul. +// This is needed because if the Consul Client pod is restarted, it loses all +// its service registrations. +// This command expects to be run as a sidecar and to be injected by the +// mutating webhook. +func (c *Command) Run(args []string) int { + c.once.Do(c.init) + if err := c.flagSet.Parse(args); err != nil { + return 1 + } + + err := c.validateFlags() + if err != nil { + c.UI.Error("Error: " + err.Error()) + return 1 + } + + logger, err := common.Logger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + c.logger = logger + + // Log initial configuration + c.logger.Info("Command configuration", "enable-service-registration", c.flagEnableServiceRegistration, + "service-config", c.flagServiceConfig, + "consul-binary", c.flagConsulBinary, + "sync-period", c.flagSyncPeriod, + "log-level", c.flagLogLevel, + "enable-metrics-merging", c.flagEnableMetricsMerging, + "merged-metrics-port", c.flagMergedMetricsPort, + "service-metrics-port", c.flagServiceMetricsPort, + "service-metrics-path", c.flagServiceMetricsPath, + ) + + // signalCtx that we pass in to the main work loop, signal handling is handled in another thread + // due to the length of time it can take for the cmd to complete causing synchronization issues + // on shutdown. Also passing a context in so that it can interrupt the cmd and exit cleanly. + signalCtx, cancelFunc := context.WithCancel(context.Background()) + go func() { + sig := <-c.sigCh + c.logger.Info(fmt.Sprintf("%s received, shutting down", sig)) + cancelFunc() + }() + + // If metrics merging is enabled, run a merged metrics server in a goroutine + // that serves Envoy sidecar metrics and Connect service metrics. The merged + // metrics server will be shut down when a signal is received by the main + // for loop using shutdownMetricsServer(). + var server *http.Server + srvExitCh := make(chan error) + if c.flagEnableMetricsMerging { + c.logger.Info("Metrics is enabled, creating merged metrics server.") + server = c.createMergedMetricsServer() + + // Run the merged metrics server. + c.logger.Info("Running merged metrics server.") + go func() { + if err = server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + srvExitCh <- err + } + }() + } + + // The work loop for re-registering the service. We continually re-register + // our service every syncPeriod. Consul is smart enough to know when the + // service hasn't changed and so won't update any indices. This means we + // won't be causing a lot of traffic within the cluster. We tolerate Consul + // Clients going down and will simply re-register once it's back up. + if c.flagEnableServiceRegistration { + c.consulCommand = []string{"services", "register"} + c.consulCommand = append(c.consulCommand, c.parseConsulFlags()...) + c.consulCommand = append(c.consulCommand, c.flagServiceConfig) + + go func() { + for { + start := time.Now() + cmd := exec.CommandContext(signalCtx, c.flagConsulBinary, c.consulCommand...) + + // Run the command and record the stdout and stderr output. + output, err := cmd.CombinedOutput() + if err != nil { + c.logger.Error("failed to sync service", "output", strings.TrimSpace(string(output)), "err", err, "duration", time.Since(start)) + } else { + c.logger.Info("successfully synced service", "output", strings.TrimSpace(string(output)), "duration", time.Since(start)) + } + select { + // Re-loop after syncPeriod or exit if we receive interrupt or terminate signals. + case <-time.After(c.flagSyncPeriod): + continue + case <-signalCtx.Done(): + return + } + } + }() + } + + // Block and wait for a signal or for the metrics server to exit. + select { + case <-signalCtx.Done(): + // After the signal is received, wait for the merged metrics server + // to gracefully shutdown as well if it has been enabled. This can + // take up to metricsServerShutdownTimeout seconds. + if c.flagEnableMetricsMerging { + c.logger.Info("Attempting to shut down metrics server.") + c.shutdownMetricsServer(server) + } + return 0 + case err := <-srvExitCh: + c.logger.Error(fmt.Sprintf("Metrics server error: %v", err)) + return 1 + } + +} + +// shutdownMetricsServer handles gracefully shutting down the server. This will +// call server.Shutdown(), which will indefinitely wait for connections to turn +// idle. To avoid potentially waiting forever, we pass a context to +// server.Shutdown() that will timeout in metricsServerShutdownTimeout (5) seconds. +func (c *Command) shutdownMetricsServer(server *http.Server) { + // The shutdownCancelFunc will be unused since it is unnecessary to call it as we + // are already about to call shutdown with a timeout. We'd only need to + // shutdownCancelFunc if we needed to trigger something to happen when the + // shutdownCancelFunc is called, which we do not. The reason for not + // discarding it with _ is for the go vet check. + shutdownCtx, shutdownCancelFunc := context.WithTimeout(context.Background(), metricsServerShutdownTimeout) + defer shutdownCancelFunc() + + c.logger.Info("Merged metrics server exists, attempting to gracefully shut down server") + if err := server.Shutdown(shutdownCtx); err != nil { + c.logger.Error(fmt.Sprintf("Server shutdown failed: %s", err)) + return + } + c.logger.Info("Server has been shut down") +} + +// createMergedMetricsServer sets up the merged metrics server. +func (c *Command) createMergedMetricsServer() *http.Server { + mux := http.NewServeMux() + mux.HandleFunc("/stats/prometheus", c.mergedMetricsHandler) + + mergedMetricsServerAddr := fmt.Sprintf("127.0.0.1:%s", c.flagMergedMetricsPort) + server := &http.Server{Addr: mergedMetricsServerAddr, Handler: mux} + + // http.Client satisfies the metricsGetter interface. + // The default http.Client timeout is indefinite, so adding a timeout makes + // sure that requests don't hang. + client := &http.Client{ + Timeout: time.Second * 10, + } + + // During tests these may already be set to mocks. + if c.envoyMetricsGetter == nil { + c.envoyMetricsGetter = client + } + if c.serviceMetricsGetter == nil { + c.serviceMetricsGetter = client + } + + return server +} + +// mergedMetricsHandler has the logic to append both Envoy and service metrics +// together, logging if it's unsuccessful at either. +// If the Envoy scrape fails, we respond with a 500 code which follows the Prometheus +// exporter guidelines. If the service scrape fails, we respond with a 200 so +// that the Envoy metrics are still scraped. +// We also include a metric line in each response indicating the success or +// failure of the service metric scraping. +func (c *Command) mergedMetricsHandler(rw http.ResponseWriter, _ *http.Request) { + envoyMetrics, err := c.envoyMetricsGetter.Get(envoyMetricsAddr) + if err != nil { + c.logger.Error("Error scraping Envoy proxy metrics", "err", err) + http.Error(rw, fmt.Sprintf("Error scraping Envoy proxy metrics: %s", err), http.StatusInternalServerError) + return + } + + // Write Envoy metrics to the response. + defer func() { + err = envoyMetrics.Body.Close() + if err != nil { + c.logger.Error(fmt.Sprintf("Error closing envoy metrics body: %s", err.Error())) + } + }() + envoyMetricsBody, err := io.ReadAll(envoyMetrics.Body) + if err != nil { + c.logger.Error("Could not read Envoy proxy metrics", "err", err) + http.Error(rw, fmt.Sprintf("Could not read Envoy proxy metrics: %s", err), http.StatusInternalServerError) + return + } + if non2xxCode(envoyMetrics.StatusCode) { + c.logger.Error("Received non-2xx status code scraping Envoy proxy metrics", "code", envoyMetrics.StatusCode, "response", string(envoyMetricsBody)) + http.Error(rw, fmt.Sprintf("Received non-2xx status code scraping Envoy proxy metrics: %d: %s", envoyMetrics.StatusCode, string(envoyMetricsBody)), http.StatusInternalServerError) + return + } + writeResponse(rw, envoyMetricsBody, "envoy metrics", c.logger) + + serviceMetricsAddr := fmt.Sprintf("http://127.0.0.1:%s%s", c.flagServiceMetricsPort, c.flagServiceMetricsPath) + serviceMetrics, err := c.serviceMetricsGetter.Get(serviceMetricsAddr) + if err != nil { + c.logger.Warn("Error scraping service metrics", "err", err) + writeResponse(rw, serviceMetricSuccess(false), "service metrics success", c.logger) + // Since we've already written the Envoy metrics to the response, we can + // return at this point if we were unable to get service metrics. + return + } + + // Since serviceMetrics will be non-nil if there are no errors, write the + // service metrics to the response as well. + defer func() { + err = serviceMetrics.Body.Close() + if err != nil { + c.logger.Error(fmt.Sprintf("Error closing service metrics body: %s", err.Error())) + } + }() + serviceMetricsBody, err := io.ReadAll(serviceMetrics.Body) + if err != nil { + c.logger.Error("Could not read service metrics", "err", err) + writeResponse(rw, serviceMetricSuccess(false), "service metrics success", c.logger) + return + } + if non2xxCode(serviceMetrics.StatusCode) { + c.logger.Error("Received non-2xx status code scraping service metrics", "code", serviceMetrics.StatusCode, "response", string(serviceMetricsBody)) + writeResponse(rw, serviceMetricSuccess(false), "service metrics success", c.logger) + return + } + writeResponse(rw, serviceMetricsBody, "service metrics", c.logger) + writeResponse(rw, serviceMetricSuccess(true), "service metrics success", c.logger) +} + +// writeResponse is a helper method to write resp to rw and log if there is an error writing. +// respName is the name of this response that will be used in the error log. +func writeResponse(rw http.ResponseWriter, resp []byte, respName string, logger hclog.Logger) { + _, err := rw.Write(resp) + if err != nil { + logger.Error(fmt.Sprintf("Error writing %s: %s", respName, err.Error())) + } +} + +// validateFlags validates the flags. +func (c *Command) validateFlags() error { + if !c.flagEnableServiceRegistration && !c.flagEnableMetricsMerging { + return errors.New("at least one of -enable-service-registration or -enable-metrics-merging must be true") + } + if c.flagEnableServiceRegistration { + if c.flagSyncPeriod == 0 { + // if sync period is 0, then the select loop will + // always pick the first case, and it'll be impossible + // to terminate the command gracefully with SIGINT. + return errors.New("-sync-period must be greater than 0") + } + if c.flagServiceConfig == "" { + return errors.New("-service-config must be set") + } + if c.flagConsulBinary == "" { + return errors.New("-consul-binary must be set") + } + if c.http.ConsulAPITimeout() <= 0 { + return errors.New("-consul-api-timeout must be set to a value greater than 0") + } + _, err := os.Stat(c.flagServiceConfig) + if os.IsNotExist(err) { + return fmt.Errorf("-service-config file %q not found", c.flagServiceConfig) + } + _, err = exec.LookPath(c.flagConsulBinary) + if err != nil { + return fmt.Errorf("-consul-binary %q not found: %s", c.flagConsulBinary, err) + } + } + return nil +} + +// non2xxCode returns true if code is not in the range of 200-299 inclusive. +func non2xxCode(code int) bool { + return code < 200 || code >= 300 +} + +// serviceMetricSuccess returns a prometheus metric line indicating +// the success of the metrics merging. +func serviceMetricSuccess(success bool) []byte { + boolAsInt := 0 + if success { + boolAsInt = 1 + } + return []byte(fmt.Sprintf("%s %d\n", prometheusServiceMetricsSuccessKey, boolAsInt)) +} + +// parseConsulFlags creates Consul client command flags +// from command's HTTP flags and returns them as an array of strings. +func (c *Command) parseConsulFlags() []string { + var consulCommandFlags []string + c.http.Flags().VisitAll(func(f *flag.Flag) { + // not adding -consul-api-timeout since consul does not use this flag + if f.Value.String() != "" && f.Name != "consul-api-timeout" { + consulCommandFlags = append(consulCommandFlags, fmt.Sprintf("-%s=%s", f.Name, f.Value.String())) + } + }) + return consulCommandFlags +} + +// interrupt sends os.Interrupt signal to the command +// so it can exit gracefully. This function is needed for tests. +func (c *Command) interrupt() { + c.sendSignal(syscall.SIGINT) +} + +func (c *Command) sendSignal(sig os.Signal) { + c.sigCh <- sig +} + +func (c *Command) Synopsis() string { return synopsis } +func (c *Command) Help() string { + c.once.Do(c.init) + return c.help +} + +const synopsis = "Consul sidecar for Connect." +const help = ` +Usage: consul-k8s-control-plane consul-sidecar [options] + + Run as a sidecar to your Connect service. Ensures that your service + is registered with the local Consul client. + +` diff --git a/control-plane/subcommand/consul-sidecar/command_ent_test.go b/control-plane/subcommand/consul-sidecar/command_ent_test.go new file mode 100644 index 0000000000..d3a198d59a --- /dev/null +++ b/control-plane/subcommand/consul-sidecar/command_ent_test.go @@ -0,0 +1,90 @@ +//go:build enterprise + +package consulsidecar + +import ( + "os" + "testing" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +// Test that we register the services with namespaces. +func TestRun_ServicesRegistration_Namespaces(t *testing.T) { + t.Parallel() + tmpDir, configFile := createServicesTmpFile(t, servicesRegistrationWithNamespaces) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", a.HTTPAddr, + "-service-config", configFile, + "-sync-period", "100ms", + "-consul-api-timeout", "5s", + }) + defer stopCommand(t, &cmd, exitChan) + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + // create necessary namespaces first + _, _, err = client.Namespaces().Create(&api.Namespace{Name: "namespace"}, nil) + require.NoError(t, err) + + timer := &retry.Timer{Timeout: 1 * time.Second, Wait: 100 * time.Millisecond} + retry.RunWith(timer, t, func(r *retry.R) { + svc, _, err := client.Agent().Service("service-id", &api.QueryOptions{Namespace: "namespace"}) + require.NoError(r, err) + require.Equal(r, 80, svc.Port) + require.Equal(r, "namespace", svc.Namespace) + + svcProxy, _, err := client.Agent().Service("service-id-sidecar-proxy", &api.QueryOptions{Namespace: "namespace"}) + require.NoError(r, err) + require.Equal(r, 2000, svcProxy.Port) + require.Equal(r, svcProxy.Namespace, "namespace") + require.Len(r, svcProxy.Proxy.Upstreams, 1) + require.Equal(r, svcProxy.Proxy.Upstreams[0].DestinationNamespace, "dest-namespace") + }) +} + +const servicesRegistrationWithNamespaces = ` +services { + id = "service-id" + name = "service" + port = 80 + namespace = "namespace" +} +services { + id = "service-id-sidecar-proxy" + name = "service-sidecar-proxy" + namespace = "namespace" + port = 2000 + kind = "connect-proxy" + proxy { + destination_service_name = "service" + destination_service_id = "service-id" + local_service_port = 80 + upstreams { + destination_type = "service" + destination_name = "dest-name" + destination_namespace = "dest-namespace" + local_bind_port = 1234 + } + } +}` diff --git a/control-plane/subcommand/consul-sidecar/command_test.go b/control-plane/subcommand/consul-sidecar/command_test.go new file mode 100644 index 0000000000..cd2d024ec5 --- /dev/null +++ b/control-plane/subcommand/consul-sidecar/command_test.go @@ -0,0 +1,643 @@ +package consulsidecar + +import ( + "bytes" + "fmt" + "io" + + "net" + "net/http" + "os" + "path/filepath" + "syscall" + "testing" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestRun_Defaults(t *testing.T) { + t.Parallel() + var cmd Command + cmd.init() + require.Equal(t, 10*time.Second, cmd.flagSyncPeriod) + require.Equal(t, "info", cmd.flagLogLevel) + require.Equal(t, "consul", cmd.flagConsulBinary) +} + +func TestRunSignalHandlingRegistrationOnly(t *testing.T) { + cases := map[string]os.Signal{ + "SIGINT": syscall.SIGINT, + "SIGTERM": syscall.SIGTERM, + } + for name, signal := range cases { + t.Run(name, func(t *testing.T) { + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-service-config", configFile, + "-http-addr", a.HTTPAddr, + "-sync-period", "1s", + "-consul-api-timeout", "5s", + }) + cmd.sendSignal(signal) + + // Assert that it exits cleanly or timeout. + select { + case exitCode := <-exitChan: + require.Equal(t, 0, exitCode, ui.ErrorWriter.String()) + case <-time.After(time.Second * 1): + // Fail if the signal was not caught. + require.Fail(t, "timeout waiting for command to exit") + } + // Assert that the services were not created because the cmd has exited. + _, _, err = client.Agent().Service("service-id", nil) + require.Error(t, err) + _, _, err = client.Agent().Service("service-id-sidecar-proxy", nil) + require.Error(t, err) + }) + } +} + +func TestRunSignalHandlingMetricsOnly(t *testing.T) { + cases := map[string]os.Signal{ + "SIGINT": syscall.SIGINT, + "SIGTERM": syscall.SIGTERM, + } + for name, signal := range cases { + t.Run(name, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + randomPorts := freeport.GetN(t, 1) + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-enable-service-registration=false", + "-enable-metrics-merging=true", + "-merged-metrics-port", fmt.Sprint(randomPorts[0]), + "-service-metrics-port", "8080", + "-service-metrics-path", "/metrics", + "-consul-api-timeout", "5s", + }) + + // Keep an open connection to the server by continuously sending bytes + // on the connection so it will have to be drained. + var conn net.Conn + var err error + retry.Run(t, func(r *retry.R) { + conn, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", randomPorts[0])) + if err != nil { + require.NoError(r, err) + } + }) + go func() { + for { + _, err := conn.Write([]byte("hello")) + // Once the server has been shut down there will be an error writing to that connection. So, this + // will break out of the for loop and the goroutine will exit (and be cleaned up). + if err != nil { + break + } + } + }() + + // Send a signal to consul-sidecar. The merged metrics server can take + // up to metricsServerShutdownTimeout to finish cleaning up. + cmd.sendSignal(signal) + + // Will need to wait for slightly longer than the shutdown timeout to + // make sure that the command has exited shortly after the timeout. + waitForShutdown := metricsServerShutdownTimeout + 100*time.Millisecond + + // Assert that it exits cleanly or timeout. + select { + case exitCode := <-exitChan: + require.Equal(t, 0, exitCode, ui.ErrorWriter.String()) + case <-time.After(waitForShutdown): + // Fail if the signal was not caught. + require.Fail(t, "timeout waiting for command to exit") + } + }) + } +} + +func TestRunSignalHandlingAllProcessesEnabled(t *testing.T) { + cases := map[string]os.Signal{ + "SIGINT": syscall.SIGINT, + "SIGTERM": syscall.SIGTERM, + } + for name, signal := range cases { + t.Run(name, func(t *testing.T) { + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + require.NoError(t, err) + + randomPorts := freeport.GetN(t, 1) + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-service-config", configFile, + "-http-addr", a.HTTPAddr, + "-enable-metrics-merging=true", + "-merged-metrics-port", fmt.Sprint(randomPorts[0]), + "-service-metrics-port", "8080", + "-service-metrics-path", "/metrics", + "-consul-api-timeout", "5s", + }) + + // Keep an open connection to the server by continuously sending bytes + // on the connection so it will have to be drained. + var conn net.Conn + retry.Run(t, func(r *retry.R) { + conn, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", randomPorts[0])) + if err != nil { + require.NoError(r, err) + } + }) + go func() { + for { + _, err := conn.Write([]byte("hello")) + // Once the server has been shut down there will be an error writing to that connection. So, this + // will break out of the for loop and the goroutine will exit (and be cleaned up). + if err != nil { + break + } + } + }() + + // Send a signal to consul-sidecar. The merged metrics server can take + // up to metricsServerShutdownTimeout to finish cleaning up. + cmd.sendSignal(signal) + + // Will need to wait for slightly longer than the shutdown timeout to + // make sure that the command has exited shortly after the timeout. + waitForShutdown := metricsServerShutdownTimeout + 100*time.Millisecond + + // Assert that it exits cleanly or timeout. + select { + case exitCode := <-exitChan: + require.Equal(t, 0, exitCode, ui.ErrorWriter.String()) + case <-time.After(waitForShutdown): + // Fail if the signal was not caught. + require.Fail(t, "timeout waiting for command to exit") + } + }) + } +} + +type mockEnvoyMetricsGetter struct { + respStatusCode int +} + +func (em *mockEnvoyMetricsGetter) Get(_ string) (resp *http.Response, err error) { + response := &http.Response{} + response.StatusCode = em.respStatusCode + response.Body = io.NopCloser(bytes.NewReader([]byte("envoy metrics\n"))) + return response, nil +} + +// mockServiceMetricsGetter. +type mockServiceMetricsGetter struct { + // reqURL is the last URL that was passed to Get(url) + reqURL string + + // respStatusCode is the status code to use for the response. + respStatusCode int +} + +func (sm *mockServiceMetricsGetter) Get(url string) (resp *http.Response, err error) { + // Record the URL that we were called with. + sm.reqURL = url + + response := &http.Response{} + response.Body = io.NopCloser(bytes.NewReader([]byte("service metrics\n"))) + response.StatusCode = sm.respStatusCode + + return response, nil +} + +func TestMergedMetricsServer(t *testing.T) { + cases := []struct { + name string + envoyMetricsGetter *mockEnvoyMetricsGetter + serviceMetricsGetter *mockServiceMetricsGetter + expectedStatusCode int + expectedOutput string + }{ + { + name: "happy path: envoy and service metrics are merged", + envoyMetricsGetter: &mockEnvoyMetricsGetter{ + respStatusCode: 200, + }, + serviceMetricsGetter: &mockServiceMetricsGetter{ + respStatusCode: 200, + }, + expectedStatusCode: 200, + expectedOutput: "envoy metrics\nservice metrics\nconsul_merged_service_metrics_success 1\n", + }, + { + name: "service metrics non-200", + envoyMetricsGetter: &mockEnvoyMetricsGetter{ + respStatusCode: 200, + }, + serviceMetricsGetter: &mockServiceMetricsGetter{ + respStatusCode: 404, + }, + expectedStatusCode: 200, + expectedOutput: "envoy metrics\nconsul_merged_service_metrics_success 0\n", + }, + { + name: "envoy metrics non-200", + envoyMetricsGetter: &mockEnvoyMetricsGetter{ + respStatusCode: 404, + }, + serviceMetricsGetter: &mockServiceMetricsGetter{ + respStatusCode: 200, + }, + expectedStatusCode: 500, + expectedOutput: "Received non-2xx status code scraping Envoy proxy metrics: 404: envoy metrics\n\n", + }, + { + name: "envoy and service metrics non-200", + envoyMetricsGetter: &mockEnvoyMetricsGetter{ + respStatusCode: 500, + }, + serviceMetricsGetter: &mockServiceMetricsGetter{ + respStatusCode: 500, + }, + expectedStatusCode: 500, + expectedOutput: "Received non-2xx status code scraping Envoy proxy metrics: 500: envoy metrics\n\n", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + randomPorts := freeport.GetN(t, 2) + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + flagEnableMetricsMerging: true, + flagMergedMetricsPort: fmt.Sprint(randomPorts[0]), + flagServiceMetricsPort: fmt.Sprint(randomPorts[1]), + flagServiceMetricsPath: "/metrics", + logger: hclog.Default(), + envoyMetricsGetter: c.envoyMetricsGetter, + serviceMetricsGetter: c.serviceMetricsGetter, + } + + server := cmd.createMergedMetricsServer() + go func() { + _ = server.ListenAndServe() + }() + defer server.Close() + + // Call the merged metrics endpoint and make assertions on the + // output. retry.Run times out in 7 seconds, which should give the + // merged metrics server enough time to come up. + retry.Run(t, func(r *retry.R) { + resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/stats/prometheus", randomPorts[0])) + require.NoError(r, err) + bytes, err := io.ReadAll(resp.Body) + require.NoError(r, err) + require.Equal(r, c.expectedOutput, string(bytes)) + // Verify the correct service metrics url was used. The service + // metrics endpoint is only called if the Envoy metrics endpoint + // call succeeds. + if c.envoyMetricsGetter.respStatusCode == 200 { + require.Equal(r, fmt.Sprintf("http://127.0.0.1:%d%s", randomPorts[1], "/metrics"), c.serviceMetricsGetter.reqURL) + } + }) + }) + } +} + +func TestRun_FlagValidation(t *testing.T) { + t.Parallel() + cases := []struct { + Flags []string + ExpErr string + }{ + { + Flags: []string{""}, + ExpErr: "-service-config must be set", + }, + { + Flags: []string{ + "-service-config=/config.hcl", + "-consul-binary=", + }, + ExpErr: "-consul-binary must be set", + }, + { + Flags: []string{ + "-service-config=/config.hcl", + "-consul-binary=consul", + "-sync-period=0s", + }, + ExpErr: "-sync-period must be greater than 0", + }, + { + Flags: []string{ + "-enable-service-registration=false", + "-enable-metrics-merging=false", + }, + ExpErr: " at least one of -enable-service-registration or -enable-metrics-merging must be true", + }, + { + Flags: []string{ + "-service-config=/config.hcl", + "-consul-binary=consul", + "-sync-period=5s", + "-enable-service-registration=true", + }, + ExpErr: "-consul-api-timeout must be set to a value greater than 0", + }, + } + + for _, c := range cases { + t.Run(c.ExpErr, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + responseCode := cmd.Run(c.Flags) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), c.ExpErr) + }) + } +} + +func TestRun_FlagValidation_ServiceConfigFileMissing(t *testing.T) { + t.Parallel() + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + responseCode := cmd.Run([]string{"-service-config=/does/not/exist", "-consul-binary=/not/a/valid/path", "-consul-api-timeout=5s"}) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "-service-config file \"/does/not/exist\" not found") +} + +func TestRun_FlagValidation_ConsulBinaryMissing(t *testing.T) { + t.Parallel() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + configFlag := "-service-config=" + configFile + + responseCode := cmd.Run([]string{configFlag, "-consul-binary=/not/a/valid/path", "-consul-api-timeout=5s"}) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "-consul-binary \"/not/a/valid/path\" not found") +} + +func TestRun_FlagValidation_InvalidLogLevel(t *testing.T) { + t.Parallel() + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + responseCode := cmd.Run([]string{"-service-config", configFile, "-consul-binary=consul", "-log-level=foo", "-consul-api-timeout=5s"}) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "unknown log level: foo") +} + +// Test that we register the services. +func TestRun_ServicesRegistration(t *testing.T) { + t.Parallel() + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", a.HTTPAddr, + "-service-config", configFile, + "-sync-period", "100ms", + "-consul-api-timeout", "5s", + }) + defer stopCommand(t, &cmd, exitChan) + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + svc, _, err := client.Agent().Service("service-id", nil) + require.NoError(r, err) + require.Equal(r, 80, svc.Port) + + svcProxy, _, err := client.Agent().Service("service-id-sidecar-proxy", nil) + require.NoError(r, err) + require.Equal(r, 2000, svcProxy.Port) + }) +} + +// Test that we register services when the Consul agent is down at first. +func TestRun_ServicesRegistration_ConsulDown(t *testing.T) { + t.Parallel() + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // we need to reserve all 6 ports to avoid potential + // port collisions with other tests + randomPorts := freeport.GetN(t, 6) + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", fmt.Sprintf("127.0.0.1:%d", randomPorts[1]), + "-service-config", configFile, + "-sync-period", "100ms", + "-consul-api-timeout", "5s", + }) + defer stopCommand(t, &cmd, exitChan) + + // Start the Consul agent after 500ms. + time.Sleep(500 * time.Millisecond) + a, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.Ports = &testutil.TestPortConfig{ + DNS: randomPorts[0], + HTTP: randomPorts[1], + HTTPS: randomPorts[2], + SerfLan: randomPorts[3], + SerfWan: randomPorts[4], + Server: randomPorts[5], + } + }) + require.NoError(t, err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + // The services should be registered when the Consul agent comes up + retry.Run(t, func(r *retry.R) { + svc, _, err := client.Agent().Service("service-id", nil) + require.NoError(r, err) + require.Equal(r, 80, svc.Port) + + svcProxy, _, err := client.Agent().Service("service-id-sidecar-proxy", nil) + require.NoError(r, err) + require.Equal(r, 2000, svcProxy.Port) + }) +} + +// Test that we parse all flags and pass them down to the underlying Consul command. +func TestRun_ConsulCommandFlags(t *testing.T) { + t.Parallel() + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", a.HTTPAddr, + "-service-config", configFile, + "-sync-period", "1s", + "-consul-binary", "consul", + "-token=abc", + "-token-file=/token/file", + "-ca-file=/ca/file", + "-ca-path=/ca/path", + "-consul-api-timeout", "5s", + }) + defer stopCommand(t, &cmd, exitChan) + + expectedCommand := []string{ + "services", + "register", + "-http-addr=" + a.HTTPAddr, + "-token=abc", + "-token-file=/token/file", + "-ca-file=/ca/file", + "-ca-path=/ca/path", + configFile, + } + retry.Run(t, func(r *retry.R) { + require.ElementsMatch(r, expectedCommand, cmd.consulCommand) + }) +} + +// This function starts the command asynchronously and returns a non-blocking chan. +// When finished, the command will send its exit code to the channel. +// Note that it's the responsibility of the caller to terminate the command by calling stopCommand, +// otherwise it can run forever. +func runCommandAsynchronously(cmd *Command, args []string) chan int { + // We have to run cmd.init() to ensure that the channel the command is + // using to watch for os interrupts is initialized. If we don't do this, + // then if stopCommand is called immediately, it will block forever + // because it calls interrupt() which will attempt to send on a nil channel. + cmd.init() + exitChan := make(chan int, 1) + go func() { + exitChan <- cmd.Run(args) + }() + return exitChan +} + +func stopCommand(t *testing.T, cmd *Command, exitChan chan int) { + if len(exitChan) == 0 { + cmd.interrupt() + } + c := <-exitChan + require.Equal(t, 0, c, string(cmd.UI.(*cli.MockUi).ErrorWriter.Bytes())) +} + +// createServicesTmpFile creates a temp directory +// and writes servicesRegistration as an HCL file there. +func createServicesTmpFile(t *testing.T, serviceHCL string) (string, string) { + tmpDir, err := os.MkdirTemp("", "") + require.NoError(t, err) + + configFile := filepath.Join(tmpDir, "svc.hcl") + err = os.WriteFile(configFile, []byte(serviceHCL), 0600) + require.NoError(t, err) + + return tmpDir, configFile +} + +const servicesRegistration = ` +services { + id = "service-id" + name = "service" + port = 80 +} +services { + id = "service-id-sidecar-proxy" + name = "service-sidecar-proxy" + port = 2000 + kind = "connect-proxy" + proxy { + destination_service_name = "service" + destination_service_id = "service-id" + local_service_port = 80 + } +}` diff --git a/control-plane/subcommand/controller/command.go b/control-plane/subcommand/controller/command.go index 4aa20b7eac..083eb72711 100644 --- a/control-plane/subcommand/controller/command.go +++ b/control-plane/subcommand/controller/command.go @@ -6,18 +6,16 @@ import ( "flag" "fmt" "os" - "os/signal" "sync" - "syscall" "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - connectinject "github.com/hashicorp/consul-k8s/control-plane/connect-inject" + "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/controller" mutatingwebhookconfiguration "github.com/hashicorp/consul-k8s/control-plane/helper/mutating-webhook-configuration" cmdCommon "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" + "github.com/hashicorp/consul/api" "github.com/mitchellh/cli" "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/runtime" @@ -35,12 +33,13 @@ const WebhookCAFilename = "ca.crt" type Command struct { UI cli.Ui - flagSet *flag.FlagSet - consulFlags *flags.ConsulFlags + flagSet *flag.FlagSet + httpFlags *flags.HTTPFlags flagWebhookTLSCertDir string flagEnableLeaderElection bool flagEnableWebhooks bool + flagDatacenter string flagLogLevel string flagLogJSON bool flagResourcePrefix string @@ -73,6 +72,8 @@ func (c *Command) init() { c.flagSet.BoolVar(&c.flagEnableLeaderElection, "enable-leader-election", false, "Enable leader election for controller. "+ "Enabling this will ensure there is only one active controller manager.") + c.flagSet.StringVar(&c.flagDatacenter, "datacenter", "", + "Name of the Consul datacenter the controller is operating in. This is added as metadata on managed custom resources.") c.flagSet.BoolVar(&c.flagEnableNamespaces, "enable-namespaces", false, "[Enterprise Only] Enables Consul Enterprise namespaces, in either a single Consul namespace or mirrored.") c.flagSet.StringVar(&c.flagConsulDestinationNamespace, "consul-destination-namespace", "default", @@ -99,8 +100,8 @@ func (c *Command) init() { c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") - c.consulFlags = &flags.ConsulFlags{} - flags.Merge(c.flagSet, c.consulFlags.Flags()) + c.httpFlags = &flags.HTTPFlags{} + flags.Merge(c.flagSet, c.httpFlags.Flags()) c.help = flags.Usage(help, c.flagSet) } @@ -124,37 +125,6 @@ func (c *Command) Run(args []string) int { ctrl.SetLogger(zapLogger) klog.SetLogger(zapLogger) - // TODO (agentless): find a way to integrate zap logger (via having a generic logger interface in connection manager). - hcLog, err := cmdCommon.NamedLogger(c.flagLogLevel, c.flagLogJSON, "consul-server-connection-manager") - if err != nil { - c.UI.Error(fmt.Sprintf("Error setting up logging: %s", err.Error())) - return 1 - } - - // Create a context to be used by the processes started in this command. - ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - defer cancelFunc() - // Start Consul server Connection manager - serverConnMgrCfg, err := c.consulFlags.ConsulServerConnMgrConfig() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) - return 1 - } - watcher, err := discovery.NewWatcher(ctx, serverConnMgrCfg, hcLog) - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) - return 1 - } - - go watcher.Run() - defer watcher.Stop() - - _, err = watcher.State() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to get Consul server addresses from watcher: %s", err)) - return 1 - } - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, Port: 9443, @@ -167,10 +137,18 @@ func (c *Command) Run(args []string) int { return 1 } - partitionsEnabled := c.consulFlags.Partition != "" + cfg := api.DefaultConfig() + c.httpFlags.MergeOntoConfig(cfg) + consulClient, err := consul.NewClient(cfg, c.httpFlags.ConsulAPITimeout()) + if err != nil { + setupLog.Error(err, "connecting to Consul agent") + return 1 + } + + partitionsEnabled := c.httpFlags.Partition() != "" consulMeta := common.ConsulMeta{ PartitionsEnabled: partitionsEnabled, - Partition: c.consulFlags.Partition, + Partition: c.httpFlags.Partition(), NamespacesEnabled: c.flagEnableNamespaces, DestinationNamespace: c.flagConsulDestinationNamespace, Mirroring: c.flagEnableNSMirroring, @@ -178,9 +156,8 @@ func (c *Command) Run(args []string) int { } configEntryReconciler := &controller.ConfigEntryController{ - ConsulClientConfig: c.consulFlags.ConsulClientConfig(), - ConsulServerConnMgr: watcher, - DatacenterName: c.consulFlags.Datacenter, + ConsulClient: consulClient, + DatacenterName: c.flagDatacenter, EnableConsulNamespaces: c.flagEnableNamespaces, ConsulDestinationNamespace: c.flagConsulDestinationNamespace, EnableNSMirroring: c.flagEnableNSMirroring, @@ -287,72 +264,76 @@ func (c *Command) Run(args []string) int { // annotation in each webhook file. mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicedefaults", &webhook.Admission{Handler: &v1alpha1.ServiceDefaultsWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceDefaults), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceDefaults), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-serviceresolver", &webhook.Admission{Handler: &v1alpha1.ServiceResolverWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceResolver), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceResolver), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-proxydefaults", &webhook.Admission{Handler: &v1alpha1.ProxyDefaultsWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.ProxyDefaults), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ProxyDefaults), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-mesh", &webhook.Admission{Handler: &v1alpha1.MeshWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.Mesh), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.Mesh), }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-exportedservices", &webhook.Admission{Handler: &v1alpha1.ExportedServicesWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.ExportedServices), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ExportedServices), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicerouter", &webhook.Admission{Handler: &v1alpha1.ServiceRouterWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceRouter), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceRouter), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicesplitter", &webhook.Admission{Handler: &v1alpha1.ServiceSplitterWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceSplitter), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceSplitter), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-serviceintentions", &webhook.Admission{Handler: &v1alpha1.ServiceIntentionsWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceIntentions), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceIntentions), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-ingressgateway", &webhook.Admission{Handler: &v1alpha1.IngressGatewayWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.IngressGateway), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.IngressGateway), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-terminatinggateway", &webhook.Admission{Handler: &v1alpha1.TerminatingGatewayWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(common.TerminatingGateway), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.TerminatingGateway), + ConsulMeta: consulMeta, }}) } // +kubebuilder:scaffold:builder - if err = mgr.AddReadyzCheck("ready", connectinject.ReadinessCheck{CertDir: c.flagWebhookTLSCertDir}.Ready); err != nil { - setupLog.Error(err, "unable to create readiness check", "controller", connectinject.EndpointsController{}) - return 1 - } - if c.flagEnableWebhookCAUpdate { err := c.updateWebhookCABundle() if err != nil { @@ -400,12 +381,12 @@ func (c *Command) validateFlags() error { return errors.New("Invalid arguments: should have no non-flag arguments") } if c.flagEnableWebhooks && c.flagWebhookTLSCertDir == "" { - return errors.New("invalid arguments: -webhook-tls-cert-dir must be set") + return errors.New("Invalid arguments: -webhook-tls-cert-dir must be set") } - if c.consulFlags.Datacenter == "" { + if c.flagDatacenter == "" { return errors.New("Invalid arguments: -datacenter must be set") } - if c.consulFlags.APITimeout <= 0 { + if c.httpFlags.ConsulAPITimeout() <= 0 { return errors.New("-consul-api-timeout must be set to a value greater than 0") } diff --git a/control-plane/subcommand/controller/command_test.go b/control-plane/subcommand/controller/command_test.go index 792548f324..016299d125 100644 --- a/control-plane/subcommand/controller/command_test.go +++ b/control-plane/subcommand/controller/command_test.go @@ -27,12 +27,12 @@ func TestRun_FlagValidation(t *testing.T) { expErr: "-datacenter must be set", }, { - flags: []string{"-webhook-tls-cert-dir", "/foo", "-datacenter", "foo", "-api-timeout=0s"}, - expErr: "-api-timeout must be set to a value greater than 0", + flags: []string{"-webhook-tls-cert-dir", "/foo", "-datacenter", "foo"}, + expErr: "-consul-api-timeout must be set to a value greater than 0", }, { flags: []string{"-webhook-tls-cert-dir", "/foo", "-datacenter", "foo", - "-log-level", "invalid"}, + "-consul-api-timeout", "5s", "-log-level", "invalid"}, expErr: `unknown log level "invalid": unrecognized level: "invalid"`, }, } diff --git a/control-plane/subcommand/create-federation-secret/command_test.go b/control-plane/subcommand/create-federation-secret/command_test.go index d0f85fa686..dca207d8f1 100644 --- a/control-plane/subcommand/create-federation-secret/command_test.go +++ b/control-plane/subcommand/create-federation-secret/command_test.go @@ -28,7 +28,7 @@ func TestRun_FlagValidation(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) cases := []struct { flags []string @@ -101,7 +101,7 @@ func TestRun_CAFileMissing(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -124,7 +124,7 @@ func TestRun_ServerCACertFileMissing(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -147,7 +147,7 @@ func TestRun_ServerCAKeyFileMissing(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -170,7 +170,7 @@ func TestRun_GossipEncryptionKeyFileMissing(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -194,7 +194,7 @@ func TestRun_GossipEncryptionKeyFileEmpty(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -220,7 +220,7 @@ func TestRun_ReplicationTokenMissingExpectedKey(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() k8s := fake.NewSimpleClientset() diff --git a/control-plane/subcommand/flags/consul.go b/control-plane/subcommand/flags/consul.go deleted file mode 100644 index 14839e8962..0000000000 --- a/control-plane/subcommand/flags/consul.go +++ /dev/null @@ -1,265 +0,0 @@ -package flags - -import ( - "crypto/tls" - "flag" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-server-connection-manager/discovery" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-rootcerts" -) - -const ( - AddressesEnvVar = "CONSUL_ADDRESSES" - GRPCPortEnvVar = "CONSUL_GRPC_PORT" - HTTPPortEnvVar = "CONSUL_HTTP_PORT" - - NamespaceEnvVar = "CONSUL_NAMESPACE" - PartitionEnvVar = "CONSUL_PARTITION" - DatacenterEnvVar = "CONSUL_DATACENTER" - - UseTLSEnvVar = "CONSUL_USE_TLS" - CACertFileEnvVar = "CONSUL_CACERT_FILE" - CACertPEMEnvVar = "CONSUL_CACERT_PEM" - TLSServerNameEnvVar = "CONSUL_TLS_SERVER_NAME" - - ACLTokenEnvVar = "CONSUL_ACL_TOKEN" - ACLTokenFileEnvVar = "CONSUL_ACL_TOKEN_FILE" - - LoginAuthMethodEnvVar = "CONSUL_LOGIN_AUTH_METHOD" - LoginBearerTokenFileEnvVar = "CONSUL_LOGIN_BEARER_TOKEN_FILE" - LoginDatacenterEnvVar = "CONSUL_LOGIN_DATACENTER" - LoginPartitionEnvVar = "CONSUL_LOGIN_PARTITION" - LoginNamespaceEnvVar = "CONSUL_LOGIN_NAMESPACE" - LoginMetaEnvVar = "CONSUL_LOGIN_META" - - APITimeoutEnvVar = "CONSUL_API_TIMEOUT" -) - -// ConsulFlags is a set of flags used to connect to Consul (servers). -type ConsulFlags struct { - Addresses string - GRPCPort int - HTTPPort int - APITimeout time.Duration - - Namespace string - Partition string - Datacenter string - - ConsulTLSFlags - ConsulACLFlags -} - -type ConsulTLSFlags struct { - UseTLS bool - CACertFile string - CACertPEM string - TLSServerName string -} - -type ConsulACLFlags struct { - ConsulLogin ConsulLoginFlags - - Token string - TokenFile string -} - -type ConsulLoginFlags struct { - AuthMethod string - BearerTokenFile string - Datacenter string - Namespace string - Partition string - Meta map[string]string -} - -func (f *ConsulFlags) Flags() *flag.FlagSet { - fs := flag.NewFlagSet("consul", flag.ContinueOnError) - - // Ignore parsing errors below because if we can't parse env variable because we want to - // behave as if that env variable is not provided. - grpcPort, _ := strconv.Atoi(os.Getenv(GRPCPortEnvVar)) - httpPort, _ := strconv.Atoi(os.Getenv(HTTPPortEnvVar)) - useTLS, _ := strconv.ParseBool(os.Getenv(UseTLSEnvVar)) - consulLoginMetaFromEnv := os.Getenv(LoginMetaEnvVar) - if consulLoginMetaFromEnv != "" { - // Parse meta from env var. - metaKeyValuePairs := strings.Split(consulLoginMetaFromEnv, ",") - for _, metaKeyValue := range metaKeyValuePairs { - kvList := strings.Split(metaKeyValue, "=") - // We want to skip setting meta from env var if the key-value pairs are not provided correctly. - if len(kvList) == 2 { - if f.ConsulLogin.Meta == nil { - f.ConsulLogin.Meta = make(map[string]string) - } - f.ConsulLogin.Meta[kvList[0]] = kvList[1] - } - } - } - - defaultConsulLoginBearerTokenFile := "/var/run/secrets/kubernetes.io/serviceaccount/token" - if bearerTokenFileEnvVar := os.Getenv(LoginBearerTokenFileEnvVar); bearerTokenFileEnvVar != "" { - defaultConsulLoginBearerTokenFile = bearerTokenFileEnvVar - } - - defaultAPITimeout := 5 * time.Second - if apiTimeoutEnv := os.Getenv(APITimeoutEnvVar); apiTimeoutEnv != "" { - parsedAPITimeout, _ := time.ParseDuration(apiTimeoutEnv) - if parsedAPITimeout != 0 { - defaultAPITimeout = parsedAPITimeout - } - } - - fs.StringVar(&f.Addresses, "addresses", os.Getenv(AddressesEnvVar), - "Consul server addresses. Can also be provided via CONSUL_ADDRESSES environment variable. "+ - "Value can be:\n"+ - "1. DNS name (that resolves to servers or DNS name of a load-balancer front of Consul servers) or an IP address; OR\n"+ - "2.'exec='. The executable\n"+ - " a) on success - should exit 0 and print to stdout whitespace delimited IP (v4/v6) addresses\n"+ - " b) on failure - exit with a non-zero code and optionally print an error message of upto 1024 bytes to stderr.\n"+ - " Refer to https://github.com/hashicorp/go-netaddrs#summary for more details and examples.") - fs.IntVar(&f.GRPCPort, "grpc-port", grpcPort, - "gRPC port to use when connecting to Consul servers.") - fs.IntVar(&f.HTTPPort, "http-port", httpPort, - "HTTP or HTTPs port to use when connecting to Consul servers.") - fs.StringVar(&f.Namespace, "namespace", os.Getenv(NamespaceEnvVar), - "[Enterprise only] Consul namespace.") - fs.StringVar(&f.Partition, "partition", os.Getenv(PartitionEnvVar), - "[Enterprise only] Consul admin partition. Default to \"default\" if Admin Partitions are enabled.") - fs.StringVar(&f.Datacenter, "datacenter", os.Getenv(DatacenterEnvVar), - "Consul datacenter.") - fs.StringVar(&f.CACertFile, "ca-cert-file", os.Getenv(CACertFileEnvVar), - "Path to a CA certificate to use for TLS when communicating with Consul.") - fs.StringVar(&f.CACertPEM, "ca-cert-pem", os.Getenv(CACertPEMEnvVar), - "CA certificate PEM to use for TLS when communicating with Consul.") - fs.StringVar(&f.TLSServerName, "tls-server-name", os.Getenv(TLSServerNameEnvVar), - "The server name to use as the SNI host when connecting via TLS. "+ - "This can also be specified via the CONSUL_TLS_SERVER_NAME environment variable.") - fs.BoolVar(&f.UseTLS, "use-tls", useTLS, "If true, use TLS for connections to Consul.") - fs.StringVar(&f.Token, "token", os.Getenv(ACLTokenEnvVar), - "ACL token to use for connection to Consul."+ - "This can also be specified via the CONSUL_ACL_TOKEN environment variable.") - fs.StringVar(&f.TokenFile, "token-file", os.Getenv(ACLTokenFileEnvVar), - "ACL token file to use for connection to Consul."+ - "This can also be specified via the CONSUL_ACL_TOKEN_FILE environment variable.") - fs.StringVar(&f.ConsulLogin.AuthMethod, "auth-method-name", os.Getenv(LoginAuthMethodEnvVar), - "Auth method name to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_AUTH_METHOD environment variable.") - fs.StringVar(&f.ConsulLogin.BearerTokenFile, "consul-login-bearer-token-file", defaultConsulLoginBearerTokenFile, - "Bearer token file to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_BEARER_TOKEN_FILE environment variable.") - fs.StringVar(&f.ConsulLogin.Datacenter, "consul-login-datacenter", os.Getenv(LoginDatacenterEnvVar), - "Auth method datacenter to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_DATACENTER environment variable.") - fs.StringVar(&f.ConsulLogin.Partition, "consul-login-partition", os.Getenv(LoginPartitionEnvVar), - "Auth method partition to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_PARTITION environment variable.") - fs.StringVar(&f.ConsulLogin.Namespace, "consul-login-namespace", os.Getenv(LoginNamespaceEnvVar), - "Auth method namespace to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_NAMESPACE environment variable.") - fs.Var((*FlagMapValue)(&f.ConsulLogin.Meta), "consul-login-meta", - "Metadata to set on the token, formatted as key=value. This flag "+ - "may be specified multiple times to set multiple meta fields.") - fs.DurationVar(&f.APITimeout, "api-timeout", defaultAPITimeout, - "The time in seconds that the consul API client will wait for a response from the API before cancelling the request.") - return fs -} - -func (f *ConsulFlags) ConsulServerConnMgrConfig() (discovery.Config, error) { - cfg := discovery.Config{ - Addresses: f.Addresses, - GRPCPort: f.GRPCPort, - } - - if f.UseTLS { - tlsConfig := &tls.Config{} - if f.CACertFile != "" { - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CAFile: f.CACertFile, - }) - if err != nil { - return discovery.Config{}, err - } - } else if f.CACertPEM != "" { - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CACertificate: []byte(f.CACertPEM), - }) - if err != nil { - return discovery.Config{}, err - } - } - tlsConfig.ServerName = f.TLSServerName - cfg.TLS = tlsConfig - } - - if f.ConsulLogin.AuthMethod != "" { - cfg.Credentials.Type = discovery.CredentialsTypeLogin - cfg.Credentials.Login.AuthMethod = f.ConsulLogin.AuthMethod - cfg.Credentials.Login.Namespace = f.ConsulLogin.Namespace - cfg.Credentials.Login.Partition = f.ConsulLogin.Partition - cfg.Credentials.Login.Datacenter = f.ConsulLogin.Datacenter - cfg.Credentials.Login.Meta = f.ConsulLogin.Meta - - bearerToken, err := os.ReadFile(f.ConsulLogin.BearerTokenFile) - if err != nil { - return discovery.Config{}, err - } - cfg.Credentials.Login.BearerToken = string(bearerToken) - } else if f.Token != "" { - cfg.Credentials.Type = discovery.CredentialsTypeStatic - cfg.Credentials.Static.Token = f.Token - } else if f.TokenFile != "" { - token, err := os.ReadFile(f.TokenFile) - if err != nil { - return discovery.Config{}, err - } - cfg.Credentials.Type = discovery.CredentialsTypeStatic - cfg.Credentials.Static.Token = string(token) - } - - return cfg, nil -} - -func (f *ConsulFlags) ConsulClientConfig() *consul.Config { - cfg := &api.Config{ - Namespace: f.Namespace, - Partition: f.Partition, - Datacenter: f.Datacenter, - Scheme: "http", - } - - if f.UseTLS { - cfg.Scheme = "https" - if f.CACertFile != "" { - cfg.TLSConfig.CAFile = f.CACertFile - } else if f.CACertPEM != "" { - cfg.TLSConfig.CAPem = []byte(f.CACertPEM) - } - - // Infer TLS server name from addresses. - if f.TLSServerName == "" && !strings.HasPrefix(f.Addresses, "exec=") { - cfg.TLSConfig.Address = f.Addresses - } else if f.TLSServerName != "" { - cfg.TLSConfig.Address = f.TLSServerName - } - } - - if f.Token != "" { - cfg.Token = f.Token - } else if f.TokenFile != "" { - cfg.TokenFile = f.TokenFile - } - - return &consul.Config{ - APIClientConfig: cfg, - HTTPPort: f.HTTPPort, - GRPCPort: f.GRPCPort, - APITimeout: f.APITimeout, - } -} diff --git a/control-plane/subcommand/flags/consul_test.go b/control-plane/subcommand/flags/consul_test.go deleted file mode 100644 index 9425c16ef8..0000000000 --- a/control-plane/subcommand/flags/consul_test.go +++ /dev/null @@ -1,436 +0,0 @@ -package flags - -import ( - "crypto/tls" - "os" - "testing" - "time" - - "github.com/hashicorp/consul-server-connection-manager/discovery" - "github.com/hashicorp/consul/api" - "github.com/stretchr/testify/require" -) - -func TestConsulFlags_Flags(t *testing.T) { - cases := map[string]struct { - env map[string]string - expFlags *ConsulFlags - }{ - "env vars": { - env: map[string]string{ - AddressesEnvVar: "consul.address", - GRPCPortEnvVar: "8503", - HTTPPortEnvVar: "8501", - NamespaceEnvVar: "test-ns", - PartitionEnvVar: "test-partition", - DatacenterEnvVar: "test-dc", - APITimeoutEnvVar: "10s", - - UseTLSEnvVar: "true", - CACertFileEnvVar: "path/to/ca.pem", - CACertPEMEnvVar: "test-ca-pem", - TLSServerNameEnvVar: "server.consul", - - ACLTokenEnvVar: "test-token", - ACLTokenFileEnvVar: "/path/to/token", - LoginAuthMethodEnvVar: "test-auth-method", - LoginBearerTokenFileEnvVar: "path/to/token", - LoginDatacenterEnvVar: "other-test-dc", - LoginPartitionEnvVar: "other-test-partition", - LoginNamespaceEnvVar: "other-test-ns", - LoginMetaEnvVar: "key1=value1,key2=value2", - }, - expFlags: &ConsulFlags{ - Addresses: "consul.address", - GRPCPort: 8503, - HTTPPort: 8501, - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - APITimeout: 10 * time.Second, - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertFile: "path/to/ca.pem", - CACertPEM: "test-ca-pem", - TLSServerName: "server.consul", - }, - ConsulACLFlags: ConsulACLFlags{ - Token: "test-token", - TokenFile: "/path/to/token", - ConsulLogin: ConsulLoginFlags{ - AuthMethod: "test-auth-method", - BearerTokenFile: "path/to/token", - Datacenter: "other-test-dc", - Partition: "other-test-partition", - Namespace: "other-test-ns", - Meta: map[string]string{"key1": "value1", "key2": "value2"}, - }, - }, - }, - }, - "defaults": { - expFlags: &ConsulFlags{ - APITimeout: 5 * time.Second, - ConsulACLFlags: ConsulACLFlags{ - ConsulLogin: ConsulLoginFlags{ - BearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - }, - }, - }, - "ignore invalid env vars": { - env: map[string]string{ - GRPCPortEnvVar: "not-int-grpc-port", - HTTPPortEnvVar: "not-int-http-port", - APITimeoutEnvVar: "10sec", - - UseTLSEnvVar: "not-a-bool", - - LoginMetaEnvVar: "key1:value1;key2:value2", - }, - expFlags: &ConsulFlags{ - APITimeout: 5 * time.Second, - ConsulACLFlags: ConsulACLFlags{ - ConsulLogin: ConsulLoginFlags{ - BearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - }, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - for k, v := range c.env { - err := os.Setenv(k, v) - require.NoError(t, err) - } - t.Cleanup(func() { - for k := range c.env { - _ = os.Unsetenv(k) - } - }) - - cf := &ConsulFlags{} - consulFlags := cf.Flags() - err := consulFlags.Parse(nil) - require.NoError(t, err) - require.Equal(t, c.expFlags, cf) - }) - } -} - -func TestConsulFlags_ConsulServerConnMgrConfig(t *testing.T) { - cases := map[string]struct { - flags ConsulFlags - expConfig discovery.Config - }{ - "basic flags without TLS or ACLs": { - flags: ConsulFlags{ - Addresses: "consul.address", - GRPCPort: 8502, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - GRPCPort: 8502, - }, - }, - "default TLS": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - TLS: &tls.Config{}, - }, - }, - "ACL Auth method": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulACLFlags: ConsulACLFlags{ - ConsulLogin: ConsulLoginFlags{ - AuthMethod: "test-auth-method", - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - Meta: map[string]string{"key1": "value1", "key2": "value2"}, - }, - }, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - Credentials: discovery.Credentials{ - Type: discovery.CredentialsTypeLogin, - Login: discovery.LoginCredential{ - AuthMethod: "test-auth-method", - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - BearerToken: "bearer-token", - Meta: map[string]string{"key1": "value1", "key2": "value2"}, - }, - }, - }, - }, - "Static ACL token": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulACLFlags: ConsulACLFlags{ - Token: "test-token", - }, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - Credentials: discovery.Credentials{ - Type: discovery.CredentialsTypeStatic, - Static: discovery.StaticTokenCredential{ - Token: "test-token", - }, - }, - }, - }, - "Static ACL token file": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulACLFlags: ConsulACLFlags{ - // This is the content of the token that we will - // write to a temp file and expect the config to have this in its contents - TokenFile: "test-token", - }, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - Credentials: discovery.Credentials{ - Type: discovery.CredentialsTypeStatic, - Static: discovery.StaticTokenCredential{ - Token: "test-token", - }, - }, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - if c.flags.ConsulLogin.AuthMethod != "" { - tokenFile, err := os.CreateTemp("", "") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.RemoveAll(tokenFile.Name()) - }) - _, err = tokenFile.WriteString("bearer-token") - require.NoError(t, err) - c.flags.ConsulLogin.BearerTokenFile = tokenFile.Name() - } else if c.flags.TokenFile != "" { - tokenFile, err := os.CreateTemp("", "") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.RemoveAll(tokenFile.Name()) - }) - _, err = tokenFile.WriteString(c.flags.TokenFile) - require.NoError(t, err) - c.flags.TokenFile = tokenFile.Name() - } - cfg, err := c.flags.ConsulServerConnMgrConfig() - require.NoError(t, err) - require.Equal(t, c.expConfig, cfg) - }) - } -} - -func TestConsulFlags_ConsulServerConnMgrConfig_TLS(t *testing.T) { - caFile, err := os.CreateTemp("", "") - t.Cleanup(func() { - _ = os.RemoveAll(caFile.Name()) - }) - require.NoError(t, err) - _, err = caFile.WriteString(testCA) - require.NoError(t, err) - - cases := map[string]struct { - flags ConsulFlags - }{ - "default TLS": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - }, - "TLS with CA File": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertFile: caFile.Name(), - }, - }, - }, - "TLS with CA Pem": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertPEM: testCA, - }, - }, - }, - "TLS server name": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - TLSServerName: "server.consul", - }, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - cfg, err := c.flags.ConsulServerConnMgrConfig() - require.NoError(t, err) - require.NotNil(t, cfg.TLS) - if c.flags.CACertFile != "" || c.flags.CACertPEM != "" { - require.NotNil(t, cfg.TLS.RootCAs) - } - require.Equal(t, c.flags.TLSServerName, cfg.TLS.ServerName) - }) - } -} - -func TestConsulFlags_ConsulAPIClientConfig(t *testing.T) { - cases := map[string]struct { - flags ConsulFlags - expConfig *api.Config - }{ - "basic config": { - flags: ConsulFlags{ - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - }, - expConfig: &api.Config{ - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - Scheme: "http", - }, - }, - "with TLS": { - flags: ConsulFlags{ - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - expConfig: &api.Config{ - Scheme: "https", - }, - }, - "TLS: infer TLS server name when addresses is not an executable": { - flags: ConsulFlags{ - Addresses: "consul", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - expConfig: &api.Config{ - Scheme: "https", - TLSConfig: api.TLSConfig{ - Address: "consul", - }, - }, - }, - "TLS: doesn't infer TLS server name when addresses is an executable": { - flags: ConsulFlags{ - Addresses: "exec=echo 1.1.1.1", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - expConfig: &api.Config{ - Scheme: "https", - }, - }, - "TLS CA File provided": { - flags: ConsulFlags{ - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertFile: "path/to/ca", - }, - }, - expConfig: &api.Config{ - Scheme: "https", - TLSConfig: api.TLSConfig{ - CAFile: "path/to/ca", - }, - }, - }, - "TLS CA PEM provided": { - flags: ConsulFlags{ - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertPEM: testCA, - }, - }, - expConfig: &api.Config{ - Scheme: "https", - TLSConfig: api.TLSConfig{ - CAPem: []byte(testCA), - }, - }, - }, - "ACL token provided": { - flags: ConsulFlags{ - ConsulACLFlags: ConsulACLFlags{ - Token: "test-token", - }, - }, - expConfig: &api.Config{ - Scheme: "http", - Token: "test-token", - }, - }, - "ACL token file provided": { - flags: ConsulFlags{ - ConsulACLFlags: ConsulACLFlags{ - TokenFile: "/path/to/token", - }, - }, - expConfig: &api.Config{ - Scheme: "http", - TokenFile: "/path/to/token", - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - require.Equal(t, c.expConfig, c.flags.ConsulClientConfig().APIClientConfig) - }) - } -} - -const testCA = ` ------BEGIN CERTIFICATE----- -MIIC7TCCApOgAwIBAgIQbHoocPoQq7qR3MTNUXdLVDAKBggqhkjOPQQDAjCBuTEL -MAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv -MRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV -BgNVBAoTDkhhc2hpQ29ycCBJbmMuMUAwPgYDVQQDEzdDb25zdWwgQWdlbnQgQ0Eg -MTQ0MTkwOTA0MDA4ODQxOTE3MTQzNDM4MjEzMTEzMjA0NjU2OTgwMB4XDTIyMDkx -NjE4NDUwNloXDTI3MDkxNTE4NDUwNlowgbkxCzAJBgNVBAYTAlVTMQswCQYDVQQI -EwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEaMBgGA1UECRMRMTAxIFNlY29u -ZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcwFQYDVQQKEw5IYXNoaUNvcnAgSW5j -LjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENBIDE0NDE5MDkwNDAwODg0MTkxNzE0 -MzQzODIxMzExMzIwNDY1Njk4MDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABA9w -J9aqbpdoVXQLdYTfUpBM2bgElznRYQP/GcNQUtvopvVywPjC7obFuZP1oM7YX7Wy -hGyeudV4pvF1lz9nVeOjezB5MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD -AQH/MCkGA1UdDgQiBCA9dZuoEX3yrbebyEEzsN4L2rr7FJd6FsjIioR6KbMIhTAr -BgNVHSMEJDAigCA9dZuoEX3yrbebyEEzsN4L2rr7FJd6FsjIioR6KbMIhTAKBggq -hkjOPQQDAgNIADBFAiARhJR88w9EXLsq5A932auHvLFAw+uQ0a2TLSaJF54fyAIh -APQczkCoIFiLlGp0GYeHEfjvrdm2g8Q3BUDjeAUfZPaW ------END CERTIFICATE-----` diff --git a/control-plane/subcommand/flags/http.go b/control-plane/subcommand/flags/http.go index 74db3c26dc..c9232b26e2 100644 --- a/control-plane/subcommand/flags/http.go +++ b/control-plane/subcommand/flags/http.go @@ -90,10 +90,6 @@ func (f *HTTPFlags) SetTokenFile(v string) error { return f.tokenFile.Set(v) } -func (f *HTTPFlags) TLSServerName() string { - return f.tlsServerName.String() -} - func (f *HTTPFlags) ReadTokenFile() (string, error) { tokenFile := f.tokenFile.String() if tokenFile == "" { diff --git a/control-plane/subcommand/get-consul-client-ca/command_test.go b/control-plane/subcommand/get-consul-client-ca/command_test.go index 9c48e63712..37eecd5434 100644 --- a/control-plane/subcommand/get-consul-client-ca/command_test.go +++ b/control-plane/subcommand/get-consul-client-ca/command_test.go @@ -76,7 +76,7 @@ func TestRun(t *testing.T) { t.Parallel() outputFile, err := os.CreateTemp("", "ca") require.NoError(t, err) - defer os.RemoveAll(outputFile.Name()) + defer os.Remove(outputFile.Name()) caFile, certFile, keyFile := test.GenerateServerCerts(t) @@ -138,7 +138,7 @@ func TestRun_ConsulServerAvailableLater(t *testing.T) { t.Parallel() outputFile, err := os.CreateTemp("", "ca") require.NoError(t, err) - defer os.RemoveAll(outputFile.Name()) + defer os.Remove(outputFile.Name()) caFile, certFile, keyFile := test.GenerateServerCerts(t) @@ -225,7 +225,7 @@ func TestRun_GetsOnlyActiveRoot(t *testing.T) { t.Parallel() outputFile, err := os.CreateTemp("", "ca") require.NoError(t, err) - defer os.RemoveAll(outputFile.Name()) + defer os.Remove(outputFile.Name()) caFile, certFile, keyFile := test.GenerateServerCerts(t) @@ -308,7 +308,7 @@ func TestRun_WithProvider(t *testing.T) { t.Parallel() outputFile, err := os.CreateTemp("", "ca") require.NoError(t, err) - defer os.RemoveAll(outputFile.Name()) + defer os.Remove(outputFile.Name()) ui := cli.NewMockUi() diff --git a/control-plane/subcommand/inject-connect/command.go b/control-plane/subcommand/inject-connect/command.go index 0906940550..71e6473297 100644 --- a/control-plane/subcommand/inject-connect/command.go +++ b/control-plane/subcommand/inject-connect/command.go @@ -5,19 +5,19 @@ import ( "errors" "flag" "fmt" + "net/url" "os" - "os/signal" "strconv" "strings" "sync" - "syscall" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" connectinject "github.com/hashicorp/consul-k8s/control-plane/connect-inject" + "github.com/hashicorp/consul-k8s/control-plane/consul" mutatingwebhookconfiguration "github.com/hashicorp/consul-k8s/control-plane/helper/mutating-webhook-configuration" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" + "github.com/hashicorp/consul/api" "github.com/mitchellh/cli" "go.uber.org/zap/zapcore" corev1 "k8s.io/api/core/v1" @@ -41,9 +41,12 @@ type Command struct { flagCertDir string // Directory with TLS certs for listening (PEM) flagDefaultInject bool // True to inject by default flagConsulImage string // Docker image for Consul - flagConsulDataplaneImage string // Docker image for Envoy + flagEnvoyImage string // Docker image for Envoy flagConsulK8sImage string // Docker image for consul-k8s flagACLAuthMethod string // Auth Method to use for ACLs, if enabled + flagWriteServiceDefaults bool // True to enable central config injection + flagDefaultProtocol string // Default protocol for use with central config + flagConsulCACert string // [Deprecated] Path to CA Certificate to use when communicating with Consul clients flagEnvoyExtraArgs string // Extra envoy args when starting envoy flagEnableWebhookCAUpdate bool flagLogLevel string @@ -74,7 +77,6 @@ type Command struct { // Metrics settings. flagDefaultEnableMetrics bool - flagEnableGatewayMetrics bool flagDefaultEnableMetricsMerging bool flagDefaultMergedMetricsPort string flagDefaultPrometheusScrapePort string @@ -92,6 +94,10 @@ type Command struct { flagInitContainerMemoryLimit string flagInitContainerMemoryRequest string + // Server address flags. + flagReadServerExposeService bool + flagTokenServerAddresses []string + // Transparent proxy flags. flagDefaultEnableTransparentProxy bool flagTransparentProxyDefaultOverwriteProbes bool @@ -102,9 +108,6 @@ type Command struct { // Peering flags. flagEnablePeering bool - // WAN Federation flags. - flagEnableFederation bool - // Consul DNS flags. flagEnableConsulDNS bool flagResourcePrefix string @@ -112,9 +115,10 @@ type Command struct { flagEnableOpenShift bool flagSet *flag.FlagSet - consul *flags.ConsulFlags + http *flags.HTTPFlags - clientset kubernetes.Interface + consulClient *api.Client + clientset kubernetes.Interface once sync.Once help string @@ -140,16 +144,21 @@ func (c *Command) init() { "Directory with PEM-encoded TLS certificate and key to serve.") c.flagSet.StringVar(&c.flagConsulImage, "consul-image", "", "Docker image for Consul.") - c.flagSet.StringVar(&c.flagConsulDataplaneImage, "consul-dataplane-image", "", - "Docker image for Consul Dataplane.") + c.flagSet.StringVar(&c.flagEnvoyImage, "envoy-image", "", + "Docker image for Envoy.") c.flagSet.StringVar(&c.flagConsulK8sImage, "consul-k8s-image", "", "Docker image for consul-k8s. Used for the connect sidecar.") c.flagSet.BoolVar(&c.flagEnablePeering, "enable-peering", false, "Enable cluster peering controllers.") - c.flagSet.BoolVar(&c.flagEnableFederation, "enable-federation", false, "Enable Consul WAN Federation.") c.flagSet.StringVar(&c.flagEnvoyExtraArgs, "envoy-extra-args", "", "Extra envoy command line args to be set when starting envoy (e.g \"--log-level debug --disable-hot-restart\").") c.flagSet.StringVar(&c.flagACLAuthMethod, "acl-auth-method", "", "The name of the Kubernetes Auth Method to use for connectInjection if ACLs are enabled.") + c.flagSet.BoolVar(&c.flagWriteServiceDefaults, "enable-central-config", false, + "Write a service-defaults config for every Connect service using protocol from -default-protocol or Pod annotation.") + c.flagSet.StringVar(&c.flagDefaultProtocol, "default-protocol", "", + "The default protocol to use in central config registrations.") + c.flagSet.StringVar(&c.flagConsulCACert, "consul-ca-cert", "", + "[Deprecated] Please use '-ca-file' flag instead. Path to CA certificate to use if communicating with Consul clients over HTTPS.") c.flagSet.Var((*flags.AppendSliceValue)(&c.flagAllowK8sNamespacesList), "allow-k8s-namespace", "K8s namespaces to explicitly allow. May be specified multiple times.") c.flagSet.Var((*flags.AppendSliceValue)(&c.flagDenyK8sNamespacesList), "deny-k8s-namespace", @@ -189,6 +198,10 @@ func (c *Command) init() { "%q, %q, %q, and %q.", zapcore.DebugLevel.String(), zapcore.InfoLevel.String(), zapcore.WarnLevel.String(), zapcore.ErrorLevel.String())) c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") + c.flagSet.BoolVar(&c.flagReadServerExposeService, "read-server-expose-service", false, + "Enables polling the Consul servers' external service for its IP(s).") + c.flagSet.Var((*flags.AppendSliceValue)(&c.flagTokenServerAddresses), "token-server-address", + "An address of the Consul server(s) as saved in the peering token, formatted host:port, where host may be an IP or DNS name and port must be a gRPC port. May be specified multiple times for multiple addresses.") // Proxy sidecar resource setting flags. c.flagSet.StringVar(&c.flagDefaultSidecarProxyCPURequest, "default-sidecar-proxy-cpu-request", "", "Default sidecar proxy CPU request.") @@ -198,7 +211,6 @@ func (c *Command) init() { // Metrics setting flags. c.flagSet.BoolVar(&c.flagDefaultEnableMetrics, "default-enable-metrics", false, "Default for enabling connect service metrics.") - c.flagSet.BoolVar(&c.flagEnableGatewayMetrics, "enable-gateway-metrics", false, "Allows enabling Consul gateway metrics.") c.flagSet.BoolVar(&c.flagDefaultEnableMetricsMerging, "default-enable-metrics-merging", false, "Default for enabling merging of connect service metrics and envoy proxy metrics.") c.flagSet.StringVar(&c.flagDefaultMergedMetricsPort, "default-merged-metrics-port", "20100", "Default port for merged metrics endpoint on the consul-sidecar.") c.flagSet.StringVar(&c.flagDefaultPrometheusScrapePort, "default-prometheus-scrape-port", "20200", "Default port where Prometheus scrapes connect metrics from.") @@ -217,9 +229,9 @@ func (c *Command) init() { c.flagSet.StringVar(&c.flagDefaultConsulSidecarMemoryLimit, "default-consul-sidecar-memory-limit", "50Mi", "Default consul sidecar memory limit.") c.flagSet.IntVar(&c.flagDefaultEnvoyProxyConcurrency, "default-envoy-proxy-concurrency", 2, "Default Envoy proxy concurrency.") - c.consul = &flags.ConsulFlags{} + c.http = &flags.HTTPFlags{} - flags.Merge(c.flagSet, c.consul.Flags()) + flags.Merge(c.flagSet, c.http.Flags()) // flag.CommandLine is a package level variable representing the default flagSet. The init() function in // "sigs.k8s.io/controller-runtime/pkg/client/config", which is imported by ctrl, registers the flag --kubeconfig to // the default flagSet. That's why we need to merge it to have access with our flagSet. @@ -249,7 +261,6 @@ func (c *Command) Run(args []string) int { return 1 } } - if c.flagDefaultSidecarProxyCPULimit != "" { sidecarProxyCPULimit, err = resource.ParseQuantity(c.flagDefaultSidecarProxyCPULimit) if err != nil { @@ -298,7 +309,7 @@ func (c *Command) Run(args []string) int { } // Validate resource request/limit flags and parse into corev1.ResourceRequirements - initResources, err := c.parseAndValidateResourceFlags() + initResources, consulSidecarResources, err := c.parseAndValidateResourceFlags() if err != nil { c.UI.Error(err.Error()) return 1 @@ -318,73 +329,68 @@ func (c *Command) Run(args []string) int { } } - // Convert allow/deny lists to sets. - allowK8sNamespaces := flags.ToSet(c.flagAllowK8sNamespacesList) - denyK8sNamespaces := flags.ToSet(c.flagDenyK8sNamespacesList) - - zapLogger, err := common.ZapLogger(c.flagLogLevel, c.flagLogJSON) - if err != nil { - c.UI.Error(fmt.Sprintf("Error setting up logging: %s", err.Error())) - return 1 + // Create Consul API config object. + cfg := api.DefaultConfig() + c.http.MergeOntoConfig(cfg) + if cfg.TLSConfig.CAFile == "" && c.flagConsulCACert != "" { + cfg.TLSConfig.CAFile = c.flagConsulCACert } - ctrl.SetLogger(zapLogger) - klog.SetLogger(zapLogger) - - // TODO (agentless): find a way to integrate zap logger (via having a generic logger interface in connection manager). - hcLog, err := common.NamedLogger(c.flagLogLevel, c.flagLogJSON, "consul-server-connection-manager") - if err != nil { - c.UI.Error(fmt.Sprintf("Error setting up logging: %s", err.Error())) - return 1 + consulURLRaw := cfg.Address + // cfg.Address may or may not be prefixed with scheme. + if !strings.Contains(cfg.Address, "://") { + consulURLRaw = fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address) } - - listenSplits := strings.SplitN(c.flagListen, ":", 2) - if len(listenSplits) < 2 { - c.UI.Error(fmt.Sprintf("missing port in address: %s", c.flagListen)) - return 1 - } - port, err := strconv.Atoi(listenSplits[1]) + consulURL, err := url.Parse(consulURLRaw) if err != nil { - c.UI.Error(fmt.Sprintf("unable to parse port string: %s", err)) + c.UI.Error(fmt.Sprintf("error parsing consul address %q: %s", consulURLRaw, err)) return 1 } - // Create Consul API config object. - consulConfig := c.consul.ConsulClientConfig() + // Load CA file contents. + var consulCACert []byte + if cfg.TLSConfig.CAFile != "" { + var err error + consulCACert, err = os.ReadFile(cfg.TLSConfig.CAFile) + if err != nil { + c.UI.Error(fmt.Sprintf("error reading Consul's CA cert file %q: %s", cfg.TLSConfig.CAFile, err)) + return 1 + } + } - var caCertPem []byte - if c.consul.CACertFile != "" { + // Set up Consul client. + if c.consulClient == nil { var err error - caCertPem, err = os.ReadFile(c.consul.CACertFile) + c.consulClient, err = consul.NewClient(cfg, c.http.ConsulAPITimeout()) if err != nil { - c.UI.Error(fmt.Sprintf("error reading Consul's CA cert file %q", c.consul.CACertFile)) + c.UI.Error(fmt.Sprintf("error connecting to Consul agent: %s", err)) return 1 } } // Create a context to be used by the processes started in this command. - ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() - // Start Consul server Connection manager. - serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() + // Convert allow/deny lists to sets. + allowK8sNamespaces := flags.ToSet(c.flagAllowK8sNamespacesList) + denyK8sNamespaces := flags.ToSet(c.flagDenyK8sNamespacesList) + + zapLogger, err := common.ZapLogger(c.flagLogLevel, c.flagLogJSON) if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) + c.UI.Error(fmt.Sprintf("Error setting up logging: %s", err.Error())) return 1 } - watcher, err := discovery.NewWatcher(ctx, serverConnMgrCfg, hcLog) - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + ctrl.SetLogger(zapLogger) + klog.SetLogger(zapLogger) + + listenSplits := strings.SplitN(c.flagListen, ":", 2) + if len(listenSplits) < 2 { + c.UI.Error(fmt.Sprintf("missing port in address: %s", c.flagListen)) return 1 } - - go watcher.Run() - defer watcher.Stop() - - // This is a blocking command that is run in order to ensure we only start the - // connect-inject controllers only after we have access to the Consul server. - _, err = watcher.State() + port, err := strconv.Atoi(listenSplits[1]) if err != nil { - c.UI.Error(fmt.Sprintf("unable to start Consul server watcher: %s", err)) + c.UI.Error(fmt.Sprintf("unable to parse port string: %s", err)) return 1 } @@ -405,7 +411,6 @@ func (c *Command) Run(args []string) int { metricsConfig := connectinject.MetricsConfig{ DefaultEnableMetrics: c.flagDefaultEnableMetrics, - EnableGatewayMetrics: c.flagEnableGatewayMetrics, DefaultEnableMetricsMerging: c.flagDefaultEnableMetricsMerging, DefaultMergedMetricsPort: c.flagDefaultMergedMetricsPort, DefaultPrometheusScrapePort: c.flagDefaultPrometheusScrapePort, @@ -414,11 +419,13 @@ func (c *Command) Run(args []string) int { if err = (&connectinject.EndpointsController{ Client: mgr.GetClient(), - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: watcher, + ConsulClient: c.consulClient, + ConsulScheme: consulURL.Scheme, + ConsulPort: consulURL.Port(), AllowK8sNamespacesSet: allowK8sNamespaces, DenyK8sNamespacesSet: denyK8sNamespaces, MetricsConfig: metricsConfig, + ConsulClientCfg: cfg, EnableConsulPartitions: c.flagEnablePartitions, EnableConsulNamespaces: c.flagEnableNamespaces, ConsulDestinationNamespace: c.flagConsulDestinationNamespace, @@ -426,7 +433,6 @@ func (c *Command) Run(args []string) int { NSMirroringPrefix: c.flagK8SNSMirroringPrefix, CrossNSACLPolicy: c.flagCrossNamespaceACLPolicy, EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, - EnableWANFederation: c.flagEnableFederation, TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, AuthMethod: c.flagACLAuthMethod, Log: ctrl.Log.WithName("controller").WithName("endpoints"), @@ -434,6 +440,7 @@ func (c *Command) Run(args []string) int { ReleaseName: c.flagReleaseName, ReleaseNamespace: c.flagReleaseNamespace, Context: ctx, + ConsulAPITimeout: c.http.ConsulAPITimeout(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", connectinject.EndpointsController{}) return 1 @@ -446,25 +453,25 @@ func (c *Command) Run(args []string) int { if c.flagEnablePeering { if err = (&connectinject.PeeringAcceptorController{ - Client: mgr.GetClient(), - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: watcher, - ExposeServersServiceName: c.flagResourcePrefix + "-expose-servers", - ReleaseNamespace: c.flagReleaseNamespace, - Log: ctrl.Log.WithName("controller").WithName("peering-acceptor"), - Scheme: mgr.GetScheme(), - Context: ctx, + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + ExposeServersServiceName: c.flagResourcePrefix + "-expose-servers", + ReadServerExternalService: c.flagReadServerExposeService, + TokenServerAddresses: c.flagTokenServerAddresses, + ReleaseNamespace: c.flagReleaseNamespace, + Log: ctrl.Log.WithName("controller").WithName("peering-acceptor"), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "peering-acceptor") return 1 } if err = (&connectinject.PeeringDialerController{ - Client: mgr.GetClient(), - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: watcher, - Log: ctrl.Log.WithName("controller").WithName("peering-dialer"), - Scheme: mgr.GetScheme(), - Context: ctx, + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + Log: ctrl.Log.WithName("controller").WithName("peering-dialer"), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "peering-dialer") return 1 @@ -472,13 +479,15 @@ func (c *Command) Run(args []string) int { mgr.GetWebhookServer().Register("/mutate-v1alpha1-peeringacceptors", &webhook.Admission{Handler: &v1alpha1.PeeringAcceptorWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName("peering-acceptor"), + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName("peering-acceptor"), }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-peeringdialers", &webhook.Admission{Handler: &v1alpha1.PeeringDialerWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName("peering-dialer"), + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName("peering-dialer"), }}) } @@ -486,54 +495,52 @@ func (c *Command) Run(args []string) int { mgr.GetWebhookServer().Register("/mutate", &webhook.Admission{Handler: &connectinject.MeshWebhook{ - Clientset: c.clientset, - ReleaseNamespace: c.flagReleaseNamespace, - ConsulConfig: consulConfig, - ConsulServerConnMgr: watcher, - ImageConsul: c.flagConsulImage, - ImageConsulDataplane: c.flagConsulDataplaneImage, - EnvoyExtraArgs: c.flagEnvoyExtraArgs, - ImageConsulK8S: c.flagConsulK8sImage, - RequireAnnotation: !c.flagDefaultInject, - AuthMethod: c.flagACLAuthMethod, - ConsulCACert: string(caCertPem), - TLSEnabled: c.consul.UseTLS, - ConsulAddress: c.consul.Addresses, - ConsulTLSServerName: c.consul.TLSServerName, - DefaultProxyCPURequest: sidecarProxyCPURequest, - DefaultProxyCPULimit: sidecarProxyCPULimit, - DefaultProxyMemoryRequest: sidecarProxyMemoryRequest, - DefaultProxyMemoryLimit: sidecarProxyMemoryLimit, - DefaultEnvoyProxyConcurrency: c.flagDefaultEnvoyProxyConcurrency, - MetricsConfig: metricsConfig, - InitContainerResources: initResources, - ConsulPartition: c.consul.Partition, - AllowK8sNamespacesSet: allowK8sNamespaces, - DenyK8sNamespacesSet: denyK8sNamespaces, - EnableNamespaces: c.flagEnableNamespaces, - ConsulDestinationNamespace: c.flagConsulDestinationNamespace, - EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, - K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, - CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, - EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, - EnableCNI: c.flagEnableCNI, - TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, - EnableConsulDNS: c.flagEnableConsulDNS, - EnableOpenShift: c.flagEnableOpenShift, - Log: ctrl.Log.WithName("handler").WithName("connect"), - LogLevel: c.flagLogLevel, - LogJSON: c.flagLogJSON, + Clientset: c.clientset, + ConsulClient: c.consulClient, + ImageConsul: c.flagConsulImage, + ImageEnvoy: c.flagEnvoyImage, + EnvoyExtraArgs: c.flagEnvoyExtraArgs, + ImageConsulK8S: c.flagConsulK8sImage, + RequireAnnotation: !c.flagDefaultInject, + AuthMethod: c.flagACLAuthMethod, + ConsulCACert: string(consulCACert), + DefaultProxyCPURequest: sidecarProxyCPURequest, + DefaultProxyCPULimit: sidecarProxyCPULimit, + DefaultProxyMemoryRequest: sidecarProxyMemoryRequest, + DefaultProxyMemoryLimit: sidecarProxyMemoryLimit, + DefaultEnvoyProxyConcurrency: c.flagDefaultEnvoyProxyConcurrency, + MetricsConfig: metricsConfig, + InitContainerResources: initResources, + DefaultConsulSidecarResources: consulSidecarResources, + ConsulPartition: c.http.Partition(), + AllowK8sNamespacesSet: allowK8sNamespaces, + DenyK8sNamespacesSet: denyK8sNamespaces, + EnableNamespaces: c.flagEnableNamespaces, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, + K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, + EnableCNI: c.flagEnableCNI, + TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, + EnableConsulDNS: c.flagEnableConsulDNS, + ResourcePrefix: c.flagResourcePrefix, + EnableOpenShift: c.flagEnableOpenShift, + Log: ctrl.Log.WithName("handler").WithName("connect"), + LogLevel: c.flagLogLevel, + LogJSON: c.flagLogJSON, + ConsulAPITimeout: c.http.ConsulAPITimeout(), }}) if c.flagEnableWebhookCAUpdate { - err = c.updateWebhookCABundle(ctx) + err := c.updateWebhookCABundle(ctx) if err != nil { setupLog.Error(err, "problem getting CA Cert") return 1 } } - if err = mgr.Start(ctx); err != nil { + if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") return 1 } @@ -554,66 +561,74 @@ func (c *Command) updateWebhookCABundle(ctx context.Context) error { } return nil } - func (c *Command) validateFlags() error { if c.flagConsulK8sImage == "" { return errors.New("-consul-k8s-image must be set") + } if c.flagConsulImage == "" { return errors.New("-consul-image must be set") } - if c.flagConsulDataplaneImage == "" { - return errors.New("-consul-dataplane-image must be set") + if c.flagEnvoyImage == "" { + return errors.New("-envoy-image must be set") + } + if c.flagWriteServiceDefaults { + return errors.New("-enable-central-config is no longer supported") + } + if c.flagDefaultProtocol != "" { + return errors.New("-default-protocol is no longer supported") } - if c.flagEnablePartitions && c.consul.Partition == "" { - return errors.New("-partition must set if -enable-partitions is set to 'true'") + if c.flagEnablePartitions && c.http.Partition() == "" { + return errors.New("-partition-name must set if -enable-partitions is set to 'true'") } - if c.consul.Partition != "" && !c.flagEnablePartitions { - return errors.New("-enable-partitions must be set to 'true' if -partition is set") + if c.http.Partition() != "" && !c.flagEnablePartitions { + return errors.New("-enable-partitions must be set to 'true' if -partition-name is set") } if c.flagDefaultEnvoyProxyConcurrency < 0 { return errors.New("-default-envoy-proxy-concurrency must be >= 0 if set") } + if c.http.ConsulAPITimeout() <= 0 { + return errors.New("-consul-api-timeout must be set to a value greater than 0") + } return nil } - -func (c *Command) parseAndValidateResourceFlags() (corev1.ResourceRequirements, error) { +func (c *Command) parseAndValidateResourceFlags() (corev1.ResourceRequirements, corev1.ResourceRequirements, error) { // Init container var initContainerCPULimit, initContainerCPURequest, initContainerMemoryLimit, initContainerMemoryRequest resource.Quantity // Parse and validate the initContainer resources. initContainerCPURequest, err := resource.ParseQuantity(c.flagInitContainerCPURequest) if err != nil { - return corev1.ResourceRequirements{}, + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf("-init-container-cpu-request '%s' is invalid: %s", c.flagInitContainerCPURequest, err) } initContainerCPULimit, err = resource.ParseQuantity(c.flagInitContainerCPULimit) if err != nil { - return corev1.ResourceRequirements{}, + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf("-init-container-cpu-limit '%s' is invalid: %s", c.flagInitContainerCPULimit, err) } if initContainerCPULimit.Value() != 0 && initContainerCPURequest.Cmp(initContainerCPULimit) > 0 { - return corev1.ResourceRequirements{}, fmt.Errorf( + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf( "request must be <= limit: -init-container-cpu-request value of %q is greater than the -init-container-cpu-limit value of %q", c.flagInitContainerCPURequest, c.flagInitContainerCPULimit) } initContainerMemoryRequest, err = resource.ParseQuantity(c.flagInitContainerMemoryRequest) if err != nil { - return corev1.ResourceRequirements{}, + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf("-init-container-memory-request '%s' is invalid: %s", c.flagInitContainerMemoryRequest, err) } initContainerMemoryLimit, err = resource.ParseQuantity(c.flagInitContainerMemoryLimit) if err != nil { - return corev1.ResourceRequirements{}, + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf("-init-container-memory-limit '%s' is invalid: %s", c.flagInitContainerMemoryLimit, err) } if initContainerMemoryLimit.Value() != 0 && initContainerMemoryRequest.Cmp(initContainerMemoryLimit) > 0 { - return corev1.ResourceRequirements{}, fmt.Errorf( + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf( "request must be <= limit: -init-container-memory-request value of %q is greater than the -init-container-memory-limit value of %q", c.flagInitContainerMemoryRequest, c.flagInitContainerMemoryLimit) } @@ -630,7 +645,55 @@ func (c *Command) parseAndValidateResourceFlags() (corev1.ResourceRequirements, }, } - return initResources, nil + // Consul sidecar + var consulSidecarCPULimit, consulSidecarCPURequest, consulSidecarMemoryLimit, consulSidecarMemoryRequest resource.Quantity + + // Parse and validate the Consul sidecar resources + consulSidecarCPURequest, err = resource.ParseQuantity(c.flagDefaultConsulSidecarCPURequest) + if err != nil { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, + fmt.Errorf("-default-consul-sidecar-cpu-request '%s' is invalid: %s", c.flagDefaultConsulSidecarCPURequest, err) + } + consulSidecarCPULimit, err = resource.ParseQuantity(c.flagDefaultConsulSidecarCPULimit) + if err != nil { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, + fmt.Errorf("-default-consul-sidecar-cpu-limit '%s' is invalid: %s", c.flagDefaultConsulSidecarCPULimit, err) + } + if consulSidecarCPULimit.Value() != 0 && consulSidecarCPURequest.Cmp(consulSidecarCPULimit) > 0 { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf( + "request must be <= limit: -default-consul-sidecar-cpu-request value of %q is greater than the -default-consul-sidecar-cpu-limit value of %q", + c.flagDefaultConsulSidecarCPURequest, c.flagDefaultConsulSidecarCPULimit) + } + + consulSidecarMemoryRequest, err = resource.ParseQuantity(c.flagDefaultConsulSidecarMemoryRequest) + if err != nil { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, + fmt.Errorf("-default-consul-sidecar-memory-request '%s' is invalid: %s", c.flagDefaultConsulSidecarMemoryRequest, err) + } + consulSidecarMemoryLimit, err = resource.ParseQuantity(c.flagDefaultConsulSidecarMemoryLimit) + if err != nil { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, + fmt.Errorf("-default-consul-sidecar-memory-limit '%s' is invalid: %s", c.flagDefaultConsulSidecarMemoryLimit, err) + } + if consulSidecarMemoryLimit.Value() != 0 && consulSidecarMemoryRequest.Cmp(consulSidecarMemoryLimit) > 0 { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf( + "request must be <= limit: -default-consul-sidecar-memory-request value of %q is greater than the -default-consul-sidecar-memory-limit value of %q", + c.flagDefaultConsulSidecarMemoryRequest, c.flagDefaultConsulSidecarMemoryLimit) + } + + // Put into corev1.ResourceRequirements form + consulSidecarResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: consulSidecarCPURequest, + corev1.ResourceMemory: consulSidecarMemoryRequest, + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: consulSidecarCPULimit, + corev1.ResourceMemory: consulSidecarMemoryLimit, + }, + } + + return initResources, consulSidecarResources, nil } func (c *Command) Synopsis() string { return synopsis } @@ -639,12 +702,11 @@ func (c *Command) Help() string { return c.help } -const ( - synopsis = "Inject the proxy sidecar, run endpoints controller and peering controllers." - help = ` +const synopsis = "Inject Connect proxy sidecar." +const help = ` Usage: consul-k8s-control-plane inject-connect [options] - Run the admission webhook server for injecting the sidecar proxy, - the endpoints controller, and the peering controllers. + Run the admission webhook server for injecting the Consul Connect + proxy sidecar. The sidecar uses Envoy by default. + ` -) diff --git a/control-plane/subcommand/inject-connect/command_test.go b/control-plane/subcommand/inject-connect/command_test.go index 5f067cf7c2..93fff95a94 100644 --- a/control-plane/subcommand/inject-connect/command_test.go +++ b/control-plane/subcommand/inject-connect/command_test.go @@ -1,8 +1,10 @@ package connectinject import ( + "os" "testing" + "github.com/hashicorp/consul/api" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes/fake" @@ -23,109 +25,161 @@ func TestRun_FlagValidation(t *testing.T) { }, { flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo"}, - expErr: "-consul-dataplane-image must be set", + expErr: "-envoy-image must be set", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-log-level", "invalid"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0"}, + expErr: "-consul-api-timeout must be set to a value greater than 0", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-log-level", "invalid"}, expErr: "unknown log level \"invalid\": unrecognized level: \"invalid\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-ca-cert-file", "bar"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-enable-central-config", "true"}, + expErr: "-enable-central-config is no longer supported", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-protocol", "http"}, + expErr: "-default-protocol is no longer supported", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-ca-file", "bar"}, expErr: "error reading Consul's CA cert file \"bar\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-enable-partitions", "true"}, - expErr: "-partition must set if -enable-partitions is set to 'true'", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-enable-partitions", "true"}, + expErr: "-partition-name must set if -enable-partitions is set to 'true'", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-partition", "default"}, - expErr: "-enable-partitions must be set to 'true' if -partition is set", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-partition", "default"}, + expErr: "-enable-partitions must be set to 'true' if -partition-name is set", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-sidecar-proxy-cpu-limit=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-cpu-limit=unparseable"}, expErr: "-default-sidecar-proxy-cpu-limit is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-sidecar-proxy-cpu-request=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-cpu-request=unparseable"}, expErr: "-default-sidecar-proxy-cpu-request is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-sidecar-proxy-memory-limit=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-memory-limit=unparseable"}, expErr: "-default-sidecar-proxy-memory-limit is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-sidecar-proxy-memory-request=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-memory-request=unparseable"}, expErr: "-default-sidecar-proxy-memory-request is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-memory-request=50Mi", "-default-sidecar-proxy-memory-limit=25Mi", }, expErr: "request must be <= limit: -default-sidecar-proxy-memory-request value of \"50Mi\" is greater than the -default-sidecar-proxy-memory-limit value of \"25Mi\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-cpu-request=50m", "-default-sidecar-proxy-cpu-limit=25m", }, expErr: "request must be <= limit: -default-sidecar-proxy-cpu-request value of \"50m\" is greater than the -default-sidecar-proxy-cpu-limit value of \"25m\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-cpu-limit=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-cpu-limit=unparseable"}, expErr: "-init-container-cpu-limit 'unparseable' is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-cpu-request=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-cpu-request=unparseable"}, expErr: "-init-container-cpu-request 'unparseable' is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-memory-limit=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-memory-limit=unparseable"}, expErr: "-init-container-memory-limit 'unparseable' is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-memory-request=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-memory-request=unparseable"}, expErr: "-init-container-memory-request 'unparseable' is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-memory-request=50Mi", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-memory-request=50Mi", "-init-container-memory-limit=25Mi", }, expErr: "request must be <= limit: -init-container-memory-request value of \"50Mi\" is greater than the -init-container-memory-limit value of \"25Mi\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-cpu-request=50m", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-cpu-request=50m", "-init-container-cpu-limit=25m", }, expErr: "request must be <= limit: -init-container-cpu-request value of \"50m\" is greater than the -init-container-cpu-limit value of \"25m\"", }, { - flags: []string{"-consul-k8s-image", "hashicorp/consul-k8s", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-cpu-limit=unparseable"}, + expErr: "-default-consul-sidecar-cpu-limit 'unparseable' is invalid", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-cpu-request=unparseable"}, + expErr: "-default-consul-sidecar-cpu-request 'unparseable' is invalid", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-memory-limit=unparseable"}, + expErr: "-default-consul-sidecar-memory-limit 'unparseable' is invalid", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-memory-request=unparseable"}, + expErr: "-default-consul-sidecar-memory-request 'unparseable' is invalid", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-memory-request=50Mi", + "-default-consul-sidecar-memory-limit=25Mi", + }, + expErr: "request must be <= limit: -default-consul-sidecar-memory-request value of \"50Mi\" is greater than the -default-consul-sidecar-memory-limit value of \"25Mi\"", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-cpu-request=50m", + "-default-consul-sidecar-cpu-limit=25m", + }, + expErr: "request must be <= limit: -default-consul-sidecar-cpu-request value of \"50m\" is greater than the -default-consul-sidecar-cpu-limit value of \"25m\"", + }, + { + flags: []string{"-consul-k8s-image", "hashicorp/consul-k8s", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-http-addr=http://0.0.0.0:9999", "-listen", "999999"}, expErr: "missing port in address: 999999", }, { - flags: []string{"-consul-k8s-image", "hashicorp/consul-k8s", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", + flags: []string{"-consul-k8s-image", "hashicorp/consul-k8s", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-http-addr=http://0.0.0.0:9999", "-listen", ":foobar"}, expErr: "unable to parse port string: strconv.Atoi: parsing \"foobar\": invalid syntax", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-envoy-proxy-concurrency=-42", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-envoy-proxy-concurrency=-42", }, expErr: "-default-envoy-proxy-concurrency must be >= 0 if set", }, @@ -155,4 +209,32 @@ func TestRun_ResourceLimitDefaults(t *testing.T) { require.Equal(t, cmd.flagInitContainerCPULimit, "50m") require.Equal(t, cmd.flagInitContainerMemoryRequest, "25Mi") require.Equal(t, cmd.flagInitContainerMemoryLimit, "150Mi") + + // Consul sidecar container defaults + require.Equal(t, cmd.flagDefaultConsulSidecarCPURequest, "20m") + require.Equal(t, cmd.flagDefaultConsulSidecarCPULimit, "20m") + require.Equal(t, cmd.flagDefaultConsulSidecarMemoryRequest, "25Mi") + require.Equal(t, cmd.flagDefaultConsulSidecarMemoryLimit, "50Mi") +} + +func TestRun_ValidationConsulHTTPAddr(t *testing.T) { + k8sClient := fake.NewSimpleClientset() + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8sClient, + } + flags := []string{ + "-consul-k8s-image", "hashicorp/consul-k8s", + "-consul-image", "foo", + "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", + } + + os.Setenv(api.HTTPAddrEnvName, "%") + code := cmd.Run(flags) + os.Unsetenv(api.HTTPAddrEnvName) + + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "error parsing consul address \"http://%\": parse \"http://%\": invalid URL escape \"%") } diff --git a/control-plane/subcommand/install-cni/binary.go b/control-plane/subcommand/install-cni/binary.go index 2429770109..472c8bece7 100644 --- a/control-plane/subcommand/install-cni/binary.go +++ b/control-plane/subcommand/install-cni/binary.go @@ -49,7 +49,7 @@ func removeFile(path string) error { return nil } - if err := os.RemoveAll(path); err != nil { + if err := os.Remove(path); err != nil { return fmt.Errorf("error removing file %s: %w", path, err) } return nil diff --git a/control-plane/subcommand/install-cni/cniconfig.go b/control-plane/subcommand/install-cni/cniconfig.go index 922d7283dd..e4d2078ad7 100644 --- a/control-plane/subcommand/install-cni/cniconfig.go +++ b/control-plane/subcommand/install-cni/cniconfig.go @@ -111,19 +111,18 @@ func confListFileFromConfFile(cfgFile string) (string, error) { // The format of the main cni config file is unstructured json consisting of a header and list of plugins // -// { -// "cniVersion": "0.3.1", -// "name": "kindnet", -// "plugins": [ -// { -// -// }, -// { -// -// } -// ] -// } -// +// { +// "cniVersion": "0.3.1", +// "name": "kindnet", +// "plugins": [ +// { +// +// }, +// { +// +// } +// ] +// } // appendCNIConfig appends the consul-cni configuration to the main configuration file. func appendCNIConfig(consulCfg *config.CNIConfig, cfgFile string) error { // Read the config file and convert it to a map. diff --git a/control-plane/subcommand/install-cni/cniconfig_test.go b/control-plane/subcommand/install-cni/cniconfig_test.go index b6e2154adb..640b9d93cb 100644 --- a/control-plane/subcommand/install-cni/cniconfig_test.go +++ b/control-plane/subcommand/install-cni/cniconfig_test.go @@ -2,7 +2,7 @@ package installcni import ( "fmt" - "os" + "io/ioutil" "path/filepath" "testing" @@ -93,10 +93,10 @@ func TestConfListFromConfFile(t *testing.T) { actualFile, err := confListFileFromConfFile(tempCfgFile) require.NoError(t, err) - actual, err := os.ReadFile(actualFile) + actual, err := ioutil.ReadFile(actualFile) require.NoError(t, err) - expected, err := os.ReadFile(expectedCfgFile) + expected, err := ioutil.ReadFile(expectedCfgFile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) @@ -168,10 +168,10 @@ func TestAppendCNIConfig(t *testing.T) { err = appendCNIConfig(c.consulConfig, tempDestFile) require.NoError(t, err) - actual, err := os.ReadFile(tempDestFile) + actual, err := ioutil.ReadFile(tempDestFile) require.NoError(t, err) - expected, err := os.ReadFile(c.goldenFile) + expected, err := ioutil.ReadFile(c.goldenFile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) @@ -298,10 +298,10 @@ func TestRemoveCNIConfig(t *testing.T) { t.Fatal(err) } - actual, err := os.ReadFile(tempDestFile) + actual, err := ioutil.ReadFile(tempDestFile) require.NoError(t, err) - expected, err := os.ReadFile(c.cfgFile) + expected, err := ioutil.ReadFile(c.cfgFile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) diff --git a/control-plane/subcommand/install-cni/command_test.go b/control-plane/subcommand/install-cni/command_test.go index 5cb9bea91e..a7e97a4aa9 100644 --- a/control-plane/subcommand/install-cni/command_test.go +++ b/control-plane/subcommand/install-cni/command_test.go @@ -3,6 +3,7 @@ package installcni import ( "context" "fmt" + "io/ioutil" "os" "path/filepath" "testing" @@ -64,11 +65,11 @@ func TestRun_DirectoryWatcher(t *testing.T) { require.NoError(t, err) time.Sleep(50 * time.Millisecond) // The golden file contains the consul config. - expected, err := os.ReadFile(goldenFile) + expected, err := ioutil.ReadFile(goldenFile) require.NoError(t, err) // Get the name of the config file in the tempDir and read it. tempDestFile := filepath.Join(tempDir, configFile) - actual, err := os.ReadFile(tempDestFile) + actual, err := ioutil.ReadFile(tempDestFile) require.NoError(t, err) // Filewatcher should have detected a change and appended to the config file. Make sure // files match. @@ -81,7 +82,7 @@ func TestRun_DirectoryWatcher(t *testing.T) { require.NoError(t, err) time.Sleep(50 * time.Millisecond) // Re-read the config file so we can compare the updated config file. - actual, err = os.ReadFile(tempDestFile) + actual, err = ioutil.ReadFile(tempDestFile) require.NoError(t, err) // Filewatcher should have detected change, fixed and appended to the config file. Make sure // files match. diff --git a/control-plane/subcommand/install-cni/kubeconfig.go b/control-plane/subcommand/install-cni/kubeconfig.go index ca93759578..e611828e36 100644 --- a/control-plane/subcommand/install-cni/kubeconfig.go +++ b/control-plane/subcommand/install-cni/kubeconfig.go @@ -3,6 +3,7 @@ package installcni import ( "errors" "fmt" + "io/ioutil" "os" "path/filepath" @@ -119,7 +120,7 @@ func serviceAccountToken(tokenPath string) (string, error) { if _, err := os.Stat(tokenPath); errors.Is(err, os.ErrNotExist) { return "", fmt.Errorf("tokenPath does not exist: %w", err) } - token, err := os.ReadFile(tokenPath) + token, err := ioutil.ReadFile(tokenPath) if err != nil { return "", fmt.Errorf("could not read service account token: %w", err) } diff --git a/control-plane/subcommand/install-cni/kubeconfig_test.go b/control-plane/subcommand/install-cni/kubeconfig_test.go index 899ad3f600..22a7eae9b1 100644 --- a/control-plane/subcommand/install-cni/kubeconfig_test.go +++ b/control-plane/subcommand/install-cni/kubeconfig_test.go @@ -1,7 +1,7 @@ package installcni import ( - "os" + "io/ioutil" "path/filepath" "testing" @@ -39,7 +39,7 @@ func TestKubeConfigYaml(t *testing.T) { require.NoError(t, err) golden := filepath.Join("testdata", c.goldenFile) - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) diff --git a/control-plane/subcommand/partition-init/command.go b/control-plane/subcommand/partition-init/command.go index 7ca70b50a7..f539b4c62a 100644 --- a/control-plane/subcommand/partition-init/command.go +++ b/control-plane/subcommand/partition-init/command.go @@ -11,8 +11,9 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" + k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" ) @@ -20,8 +21,16 @@ import ( type Command struct { UI cli.Ui - flags *flag.FlagSet - consul *flags.ConsulFlags + flags *flag.FlagSet + k8s *k8sflags.K8SFlags + http *flags.HTTPFlags + + flagPartitionName string + + // Flags to configure Consul connection + flagServerAddresses []string + flagServerPort uint + flagUseHTTPS bool flagLogLevel string flagLogJSON bool @@ -36,11 +45,21 @@ type Command struct { once sync.Once help string + + providers map[string]discover.Provider } func (c *Command) init() { c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.flags.StringVar(&c.flagPartitionName, "partition-name", "", "The name of the partition being created.") + + c.flags.Var((*flags.AppendSliceValue)(&c.flagServerAddresses), "server-address", + "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ + "At least one value is required.") + c.flags.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") + c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, + "Toggle for using HTTPS for all API calls to Consul.") c.flags.DurationVar(&c.flagTimeout, "timeout", 10*time.Minute, "How long we'll try to bootstrap Partitions for before timing out, e.g. 1ms, 2s, 3m") c.flags.StringVar(&c.flagLogLevel, "log-level", "info", @@ -49,8 +68,10 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") - c.consul = &flags.ConsulFlags{} - flags.Merge(c.flags, c.consul.Flags()) + c.k8s = &k8sflags.K8SFlags{} + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.k8s.Flags()) + flags.Merge(c.flags, c.http.Flags()) c.help = flags.Usage(help, c.flags) // Default retry to 1s. This is exposed for setting in tests. @@ -95,52 +116,45 @@ func (c *Command) Run(args []string) int { return 1 } - // Start Consul server Connection manager - serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() - serverConnMgrCfg.ServerWatchDisabled = true - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) - return 1 - } - watcher, err := discovery.NewWatcher(c.ctx, serverConnMgrCfg, c.log.Named("consul-server-connection-manager")) + serverAddresses, err := common.GetResolvedServerAddresses(c.flagServerAddresses, c.providers, c.log) if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + c.UI.Error(fmt.Sprintf("Unable to discover any Consul addresses from %q: %s", c.flagServerAddresses[0], err)) return 1 } - go watcher.Run() - defer watcher.Stop() - - state, err := watcher.State() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to get Consul server addresses from watcher: %s", err)) - return 1 + scheme := "http" + if c.flagUseHTTPS { + scheme = "https" } - - consulClient, err := consul.NewClientFromConnMgrState(c.consul.ConsulClientConfig(), state) + // For all of the next operations we'll need a Consul client. + serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) + cfg := api.DefaultConfig() + cfg.Address = serverAddr + cfg.Scheme = scheme + c.http.MergeOntoConfig(cfg) + consulClient, err := consul.NewClient(cfg, c.http.ConsulAPITimeout()) if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul client: %s", err)) + c.UI.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) return 1 } - for { - partition, _, err := consulClient.Partitions().Read(c.ctx, c.consul.Partition, nil) + partition, _, err := consulClient.Partitions().Read(c.ctx, c.flagPartitionName, nil) // The API does not return an error if the Partition does not exist. It returns a nil Partition. if err != nil { - c.log.Error("Error reading Partition from Consul", "name", c.consul.Partition, "error", err.Error()) + c.log.Error("Error reading Partition from Consul", "name", c.flagPartitionName, "error", err.Error()) } else if partition == nil { // Retry Admin Partition creation until it succeeds, or we reach the command timeout. _, _, err = consulClient.Partitions().Create(c.ctx, &api.Partition{ - Name: c.consul.Partition, + Name: c.flagPartitionName, Description: "Created by Helm installation", }, nil) if err == nil { - c.log.Info("Successfully created Admin Partition", "name", c.consul.Partition) + c.log.Info("Successfully created Admin Partition", "name", c.flagPartitionName) return 0 } - c.log.Error("Error creating partition", "name", c.consul.Partition, "error", err.Error()) + c.log.Error("Error creating partition", "name", c.flagPartitionName, "error", err.Error()) } else { - c.log.Info("Admin Partition already exists", "name", c.consul.Partition) + c.log.Info("Admin Partition already exists", "name", c.flagPartitionName) return 0 } // Wait on either the retry duration (in which case we continue) or the @@ -150,28 +164,28 @@ func (c *Command) Run(args []string) int { case <-time.After(c.retryDuration): continue case <-c.ctx.Done(): - c.log.Error("Timed out attempting to create partition", "name", c.consul.Partition) + c.log.Error("Timed out attempting to create partition", "name", c.flagPartitionName) return 1 } } } func (c *Command) validateFlags() error { - if len(c.consul.Addresses) == 0 { - return errors.New("-addresses must be set") + if len(c.flagServerAddresses) == 0 { + return errors.New("-server-address must be set at least once") } - if c.consul.Partition == "" { - return errors.New("-partition must be set") + if c.flagPartitionName == "" { + return errors.New("-partition-name must be set") } - if c.consul.APITimeout <= 0 { - return errors.New("-api-timeout must be set to a value greater than 0") + if c.http.ConsulAPITimeout() <= 0 { + return errors.New("-consul-api-timeout must be set to a value greater than 0") } return nil } -const synopsis = "Initialize an Admin Partition in Consul." +const synopsis = "Initialize an Admin Partition on Consul." const help = ` Usage: consul-k8s-control-plane partition-init [options] diff --git a/control-plane/subcommand/partition-init/command_ent_test.go b/control-plane/subcommand/partition-init/command_ent_test.go index 5bb1868b39..1e833430f9 100644 --- a/control-plane/subcommand/partition-init/command_ent_test.go +++ b/control-plane/subcommand/partition-init/command_ent_test.go @@ -23,21 +23,22 @@ func TestRun_FlagValidation(t *testing.T) { }{ { flags: nil, - expErr: "addresses must be set", + expErr: "-server-address must be set at least once", }, { - flags: []string{"-addresses", "foo"}, - expErr: "-partition must be set", + flags: []string{"-server-address", "foo"}, + expErr: "-partition-name must be set", }, { flags: []string{ - "-addresses", "foo", "-partition", "bar", "-api-timeout", "0s"}, - expErr: "-api-timeout must be set to a value greater than 0", + "-server-address", "foo", "-partition-name", "bar"}, + expErr: "-consul-api-timeout must be set to a value greater than 0", }, { flags: []string{ - "-addresses", "foo", - "-partition", "bar", + "-server-address", "foo", + "-partition-name", "bar", + "-consul-api-timeout", "5s", "-log-level", "invalid", }, expErr: "unknown log level: invalid", @@ -74,10 +75,10 @@ func TestRun_PartitionCreate(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + "127.0.0.1", - "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], - "-partition", partitionName, + "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], + "-partition-name", partitionName, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(args) @@ -113,10 +114,10 @@ func TestRun_PartitionExists(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + "127.0.0.1", - "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], - "-partition", partitionName, + "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], + "-partition-name", partitionName, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(args) @@ -142,11 +143,11 @@ func TestRun_ExitsAfterTimeout(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + "127.0.0.1", - "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], + "-partition-name", partitionName, "-timeout", "500ms", - "-partition", partitionName, + "-consul-api-timeout", "5s", } server.Stop() startTime := time.Now() @@ -158,3 +159,5 @@ func TestRun_ExitsAfterTimeout(t *testing.T) { // some buffer time required for the task to run and assignments to occur. require.WithinDuration(t, completeTime, startTime, 1*time.Second) } + +// TODO: Write tests with ACLs enabled diff --git a/control-plane/subcommand/server-acl-init/command.go b/control-plane/subcommand/server-acl-init/command.go index 32afe491fe..9f54feac20 100644 --- a/control-plane/subcommand/server-acl-init/command.go +++ b/control-plane/subcommand/server-acl-init/command.go @@ -5,23 +5,20 @@ import ( "errors" "flag" "fmt" - "net" "os" "regexp" "strings" "sync" "time" - "github.com/cenkalti/backoff" "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/subcommand" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-netaddrs" "github.com/mitchellh/cli" "github.com/mitchellh/mapstructure" "golang.org/x/text/cases" @@ -34,9 +31,8 @@ import ( type Command struct { UI cli.Ui - flags *flag.FlagSet - k8s *k8sflags.K8SFlags - consulFlags *flags.ConsulFlags + flags *flag.FlagSet + k8s *k8sflags.K8SFlags flagResourcePrefix string flagK8sNamespace string @@ -67,13 +63,20 @@ type Command struct { flagAPIGatewayController bool // Flags to configure Consul connection. - flagServerPort uint + flagServerAddresses []string + flagServerPort uint + flagConsulCACert string + flagConsulTLSServerName string + flagUseHTTPS bool + flagConsulAPITimeout time.Duration // Flags for ACL replication. flagCreateACLReplicationToken bool flagACLReplicationTokenFile string // Flags to support partitions. + flagEnablePartitions bool // true if Admin Partitions are enabled + flagPartitionName string // name of the Admin Partition flagPartitionTokenFile string // Flags to support peering. @@ -102,8 +105,6 @@ type Command struct { clientset kubernetes.Interface - watcher consul.ServerConnectionManager - // ctx is cancelled when the command timeout is reached. ctx context.Context retryDuration time.Duration @@ -111,10 +112,10 @@ type Command struct { // log log hclog.Logger - state discovery.State - once sync.Once help string + + providers map[string]discover.Provider } func (c *Command) init() { @@ -165,8 +166,21 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagAPIGatewayController, "api-gateway-controller", false, "Toggle for configuring ACL login for the API gateway controller.") + c.flags.Var((*flags.AppendSliceValue)(&c.flagServerAddresses), "server-address", + "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ + "At least one value is required.") c.flags.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") - + c.flags.StringVar(&c.flagConsulCACert, "consul-ca-cert", "", + "Path to the PEM-encoded CA certificate of the Consul cluster.") + c.flags.StringVar(&c.flagConsulTLSServerName, "consul-tls-server-name", "", + "The server name to set as the SNI header when sending HTTPS requests to Consul.") + c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, + "Toggle for using HTTPS for all API calls to Consul.") + + c.flags.BoolVar(&c.flagEnablePartitions, "enable-partitions", false, + "[Enterprise Only] Enables Admin Partitions") + c.flags.StringVar(&c.flagPartitionName, "partition", "", + "[Enterprise Only] Name of the Admin Partition") c.flags.StringVar(&c.flagPartitionTokenFile, "partition-token-file", "", "[Enterprise Only] Path to file containing ACL token to be used in non-default partitions.") @@ -211,10 +225,11 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") + c.flags.DurationVar(&c.flagConsulAPITimeout, "consul-api-timeout", 0, + "The time in seconds that the consul API client will wait for a response from the API before cancelling the request.") + c.k8s = &k8sflags.K8SFlags{} - c.consulFlags = &flags.ConsulFlags{} flags.Merge(c.flags, c.k8s.Flags()) - flags.Merge(c.flags, c.consulFlags.Flags()) c.help = flags.Usage(help, c.flags) // Default retry to 1s. This is exposed for setting in tests. @@ -299,16 +314,14 @@ func (c *Command) Run(args []string) int { } } - var ipAddrs []net.IPAddr - if err := backoff.Retry(func() error { - ipAddrs, err = netaddrs.IPAddrs(c.ctx, c.consulFlags.Addresses, c.log) - if err != nil { - c.log.Error("Error resolving IP Address", "err", err) - return err - } - return nil - }, exponentialBackoffWithMaxInterval()); err != nil { - c.UI.Error(err.Error()) + serverAddresses, err := common.GetResolvedServerAddresses(c.flagServerAddresses, c.providers, c.log) + if err != nil { + c.UI.Error(fmt.Sprintf("Unable to discover any Consul addresses from %q: %s", c.flagServerAddresses[0], err)) + return 1 + } + scheme := "http" + if c.flagUseHTTPS { + scheme = "https" } var bootstrapToken string @@ -335,44 +348,30 @@ func (c *Command) Run(args []string) int { } } - bootstrapToken, err = c.bootstrapServers(ipAddrs, bootstrapToken, bootTokenSecretName) + bootstrapToken, err = c.bootstrapServers(serverAddresses, bootstrapToken, bootTokenSecretName, scheme) if err != nil { c.log.Error(err.Error()) return 1 } } - // Start Consul server Connection manager - var watcher consul.ServerConnectionManager - serverConnMgrCfg, err := c.consulFlags.ConsulServerConnMgrConfig() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) - return 1 + // For all of the next operations we'll need a Consul client. + serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) + clientConfig := api.DefaultConfig() + clientConfig.Address = serverAddr + clientConfig.Scheme = scheme + clientConfig.Token = bootstrapToken + clientConfig.TLSConfig = api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, } - serverConnMgrCfg.Credentials.Type = discovery.CredentialsTypeStatic - serverConnMgrCfg.Credentials.Static = discovery.StaticTokenCredential{Token: bootstrapToken} - if c.watcher == nil { - watcher, err = discovery.NewWatcher(c.ctx, serverConnMgrCfg, c.log.Named("consul-server-connection-manager")) - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) - return 1 - } - } else { - watcher = c.watcher - } - - go watcher.Run() - defer watcher.Stop() - c.state, err = watcher.State() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to get Consul server addresses from watcher: %s", err)) - return 1 + if c.flagEnablePartitions { + clientConfig.Partition = c.flagPartitionName } - - consulClient, err := consul.NewClientFromConnMgrState(c.consulFlags.ConsulClientConfig(), c.state) + consulClient, err := consul.NewClient(clientConfig, c.flagConsulAPITimeout) if err != nil { - c.log.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", c.state.Address, err)) + c.log.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) return 1 } consulDC, primaryDC, err := c.consulDatacenterList(consulClient) @@ -383,7 +382,7 @@ func (c *Command) Run(args []string) int { c.log.Info("Current datacenter", "datacenter", consulDC, "primaryDC", primaryDC) primary := consulDC == primaryDC - if c.consulFlags.Partition == consulDefaultPartition && primary { + if c.flagEnablePartitions && c.flagPartitionName == consulDefaultPartition && primary { // Partition token is local because only the Primary datacenter can have Admin Partitions. if c.flagPartitionTokenFile != "" { err = c.createACLWithSecretID("partitions", partitionRules, consulDC, primary, consulClient, partitionToken, true) @@ -483,11 +482,11 @@ func (c *Command) Run(args []string) int { // DNS lookups. The anonymous policy in the default partition needs to be updated in order to // support this use-case. Creating a separate anonymous token client that updates the anonymous // policy and token in the default partition ensures this works. - anonTokenConfig := c.consulFlags.ConsulClientConfig() - if c.consulFlags.Partition != "" { - anonTokenConfig.APIClientConfig.Partition = consulDefaultPartition + anonTokenConfig := clientConfig + if c.flagEnablePartitions { + anonTokenConfig.Partition = consulDefaultPartition } - anonTokenClient, err := consul.NewClientFromConnMgrState(anonTokenConfig, c.state) + anonTokenClient, err := consul.NewClient(anonTokenConfig, c.flagConsulAPITimeout) if err != nil { c.log.Error(err.Error()) return 1 @@ -567,7 +566,7 @@ func (c *Command) Run(args []string) int { if c.flagCreateEntLicenseToken { var err error - if c.consulFlags.Partition != "" { + if c.flagEnablePartitions { err = c.createLocalACL("enterprise-license", entPartitionLicenseRules, consulDC, primary, consulClient) } else { err = c.createLocalACL("enterprise-license", entLicenseRules, consulDC, primary, consulClient) @@ -579,7 +578,7 @@ func (c *Command) Run(args []string) int { } if c.flagSnapshotAgent { - serviceAccountName := c.withPrefix("server") + serviceAccountName := c.withPrefix("snapshot-agent") if err := c.createACLPolicyRoleAndBindingRule("snapshot-agent", snapshotAgentRules, consulDC, primaryDC, localPolicy, primary, localComponentAuthMethodName, serviceAccountName, consulClient); err != nil { c.log.Error(err.Error()) return 1 @@ -709,16 +708,6 @@ func (c *Command) Run(args []string) int { return 0 } -// exponentialBackoffWithMaxInterval creates an exponential backoff but limits the -// maximum backoff to 10 seconds so that we don't find ourselves in a situation -// where we are waiting for minutes before retries. -func exponentialBackoffWithMaxInterval() *backoff.ExponentialBackOff { - backoff := backoff.NewExponentialBackOff() - backoff.MaxInterval = 10 * time.Second - backoff.Reset() - return backoff -} - // configureGlobalComponentAuthMethod sets up an AuthMethod in the primary datacenter, // that the Consul components will use to issue global ACL tokens with. func (c *Command) configureGlobalComponentAuthMethod(consulClient *api.Client, authMethodName, primaryDC string) error { @@ -829,7 +818,7 @@ func (c *Command) configureGateway(gatewayParams ConfigureGatewayParams, consulC // the words "ingress-gateway" or "terminating-gateway". We need to create unique names for tokens // across all gateway types and so must suffix with either `-ingress-gateway` of `-terminating-gateway`. serviceAccountName := c.withPrefix(name) - err = c.createACLPolicyRoleAndBindingRule(name, rules, + err = c.createACLPolicyRoleAndBindingRule(serviceAccountName, rules, gatewayParams.ConsulDC, gatewayParams.PrimaryDC, localPolicy, gatewayParams.Primary, gatewayParams.AuthMethodName, serviceAccountName, consulClient) if err != nil { @@ -963,8 +952,8 @@ func (c *Command) createAnonymousPolicy(isPrimary bool) bool { } func (c *Command) validateFlags() error { - if c.consulFlags.Addresses == "" { - return errors.New("-addresses must be set") + if len(c.flagServerAddresses) == 0 { + return errors.New("-server-address must be set at least once") } if c.flagResourcePrefix == "" { @@ -992,7 +981,14 @@ func (c *Command) validateFlags() error { ) } - if c.consulFlags.APITimeout <= 0 { + if c.flagEnablePartitions && c.flagPartitionName == "" { + return errors.New("-partition must be set if -enable-partitions is true") + } + if !c.flagEnablePartitions && c.flagPartitionName != "" { + return errors.New("-enable-partitions must be 'true' if -partition is set") + } + + if c.flagConsulAPITimeout <= 0 { return errors.New("-consul-api-timeout must be set to a value greater than 0") } diff --git a/control-plane/subcommand/server-acl-init/command_ent_test.go b/control-plane/subcommand/server-acl-init/command_ent_test.go index f5dc35023e..27c18f82a4 100644 --- a/control-plane/subcommand/server-acl-init/command_ent_test.go +++ b/control-plane/subcommand/server-acl-init/command_ent_test.go @@ -33,7 +33,9 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { for _, consulDestNamespace := range consulDestNamespaces { t.Run(consulDestNamespace, func(tt *testing.T) { k8s, testAgent := completeSetup(tt) + defer testAgent.Stop() setUpK8sServiceAccount(tt, k8s, ns) + require := require.New(tt) ui := cli.NewMockUi() cmd := Command{ @@ -42,90 +44,91 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-connect-inject", + "-enable-partitions", "-partition=default", "-enable-namespaces", "-consul-inject-destination-namespace", consulDestNamespace, "-acl-binding-rule-selector=serviceaccount.name!=default", + "-consul-api-timeout=5s", } responseCode := cmd.Run(args) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Ensure there's only one auth method. namespaceQuery := &api.QueryOptions{ Namespace: consulDestNamespace, } methods, _, err := consul.ACL().AuthMethodList(namespaceQuery) - require.NoError(t, err) + require.NoError(err) if consulDestNamespace == "default" { // If the destination mamespace is default then AuthMethodList // will return the component-auth-method as well. - require.Len(t, methods, 2) + require.Len(methods, 2) } else { - require.Len(t, methods, 1) + require.Len(methods, 1) } // Check the ACL auth method is created in the expected namespace. authMethodName := resourcePrefix + "-k8s-auth-method" actMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, namespaceQuery) - require.NoError(t, err) - require.NotNil(t, actMethod) - require.Equal(t, "kubernetes", actMethod.Type) - require.Equal(t, "Kubernetes Auth Method", actMethod.Description) - require.NotContains(t, actMethod.Config, "MapNamespaces") - require.NotContains(t, actMethod.Config, "ConsulNamespacePrefix") + require.NoError(err) + require.NotNil(actMethod) + require.Equal("kubernetes", actMethod.Type) + require.Equal("Kubernetes Auth Method", actMethod.Description) + require.NotContains(actMethod.Config, "MapNamespaces") + require.NotContains(actMethod.Config, "ConsulNamespacePrefix") // Check the binding rule is as expected. rules, _, err := consul.ACL().BindingRuleList(authMethodName, namespaceQuery) - require.NoError(t, err) - require.Len(t, rules, 1) + require.NoError(err) + require.Len(rules, 1) actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, namespaceQuery) - require.NoError(t, err) - require.NotNil(t, actRule) - require.Equal(t, "Kubernetes binding rule", actRule.Description) - require.Equal(t, api.BindingRuleBindTypeService, actRule.BindType) - require.Equal(t, "${serviceaccount.name}", actRule.BindName) - require.Equal(t, "serviceaccount.name!=default", actRule.Selector) + require.NoError(err) + require.NotNil(actRule) + require.Equal("Kubernetes binding rule", actRule.Description) + require.Equal(api.BindingRuleBindTypeService, actRule.BindType) + require.Equal("${serviceaccount.name}", actRule.BindName) + require.Equal("serviceaccount.name!=default", actRule.Selector) // Check that the default namespace got an attached ACL policy defNamespace, _, err := consul.Namespaces().Read("default", &api.QueryOptions{}) - require.NoError(t, err) - require.NotNil(t, defNamespace) - require.NotNil(t, defNamespace.ACLs) - require.Len(t, defNamespace.ACLs.PolicyDefaults, 1) - require.Equal(t, "cross-namespace-policy", defNamespace.ACLs.PolicyDefaults[0].Name) + require.NoError(err) + require.NotNil(defNamespace) + require.NotNil(defNamespace.ACLs) + require.Len(defNamespace.ACLs.PolicyDefaults, 1) + require.Equal("cross-namespace-policy", defNamespace.ACLs.PolicyDefaults[0].Name) if consulDestNamespace != "default" { // Check that only one namespace was created besides the // already existing `default` namespace namespaces, _, err := consul.Namespaces().List(&api.QueryOptions{}) - require.NoError(t, err) - require.Len(t, namespaces, 2) + require.NoError(err) + require.Len(namespaces, 2) // Check the created namespace properties actNamespace, _, err := consul.Namespaces().Read(consulDestNamespace, &api.QueryOptions{}) - require.NoError(t, err) - require.NotNil(t, actNamespace) - require.Equal(t, consulDestNamespace, actNamespace.Name) - require.Equal(t, "Auto-generated by consul-k8s", actNamespace.Description) - require.NotNil(t, actNamespace.ACLs) - require.Len(t, actNamespace.ACLs.PolicyDefaults, 1) - require.Equal(t, "cross-namespace-policy", actNamespace.ACLs.PolicyDefaults[0].Name) - require.Contains(t, actNamespace.Meta, "external-source") - require.Equal(t, "kubernetes", actNamespace.Meta["external-source"]) + require.NoError(err) + require.NotNil(actNamespace) + require.Equal(consulDestNamespace, actNamespace.Name) + require.Equal("Auto-generated by consul-k8s", actNamespace.Description) + require.NotNil(actNamespace.ACLs) + require.Len(actNamespace.ACLs.PolicyDefaults, 1) + require.Equal("cross-namespace-policy", actNamespace.ACLs.PolicyDefaults[0].Name) + require.Contains(actNamespace.Meta, "external-source") + require.Equal("kubernetes", actNamespace.Meta["external-source"]) } }) } @@ -159,7 +162,9 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { k8s, testAgent := completeSetup(tt) + defer testAgent.Stop() setUpK8sServiceAccount(tt, k8s, ns) + require := require.New(tt) ui := cli.NewMockUi() cmd := Command{ @@ -168,52 +173,53 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-connect-inject", + "-enable-partitions", "-partition=default", "-enable-namespaces", "-enable-inject-k8s-namespace-mirroring", "-inject-k8s-namespace-mirroring-prefix", c.MirroringPrefix, "-acl-binding-rule-selector=serviceaccount.name!=default", + "-consul-api-timeout=5s", } args = append(args, c.ExtraFlags...) responseCode := cmd.Run(args) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) bootToken := getBootToken(tt, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Check the ACL auth method is as expected. authMethodName := resourcePrefix + "-k8s-auth-method" method, _, err := consul.ACL().AuthMethodRead(authMethodName, nil) - require.NoError(t, err) - require.NotNil(t, method, authMethodName+" not found") - require.Equal(t, "kubernetes", method.Type) - require.Equal(t, "Kubernetes Auth Method", method.Description) - require.Contains(t, method.Config, "MapNamespaces") - require.Contains(t, method.Config, "ConsulNamespacePrefix") - require.Equal(t, true, method.Config["MapNamespaces"]) - require.Equal(t, c.MirroringPrefix, method.Config["ConsulNamespacePrefix"]) + require.NoError(err) + require.NotNil(method, authMethodName+" not found") + require.Equal("kubernetes", method.Type) + require.Equal("Kubernetes Auth Method", method.Description) + require.Contains(method.Config, "MapNamespaces") + require.Contains(method.Config, "ConsulNamespacePrefix") + require.Equal(true, method.Config["MapNamespaces"]) + require.Equal(c.MirroringPrefix, method.Config["ConsulNamespacePrefix"]) // Check the binding rule is as expected. rules, _, err := consul.ACL().BindingRuleList(authMethodName, nil) - require.NoError(t, err) - require.Len(t, rules, 1) + require.NoError(err) + require.Len(rules, 1) actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, nil) - require.NoError(t, err) - require.NotNil(t, actRule) - require.Equal(t, "Kubernetes binding rule", actRule.Description) - require.Equal(t, api.BindingRuleBindTypeService, actRule.BindType) - require.Equal(t, "${serviceaccount.name}", actRule.BindName) - require.Equal(t, "serviceaccount.name!=default", actRule.Selector) + require.NoError(err) + require.NotNil(actRule) + require.Equal("Kubernetes binding rule", actRule.Description) + require.Equal(api.BindingRuleBindTypeService, actRule.BindType) + require.Equal("${serviceaccount.name}", actRule.BindName) + require.Equal("serviceaccount.name!=default", actRule.Selector) }) } } @@ -223,7 +229,8 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { func TestRun_AnonymousToken_CreatedFromNonDefaultPartition(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - server := partitionedSetup(t, bootToken, "test") + server, stopFn := partitionedSetup(t, bootToken, "test") + defer stopFn() k8s := fake.NewSimpleClientset() setUpK8sServiceAccount(t, k8s, ns) @@ -234,15 +241,16 @@ func TestRun_AnonymousToken_CreatedFromNonDefaultPartition(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + strings.Split(server.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-bootstrap-token-file", tokenFile, + "-enable-partitions", "-allow-dns", "-partition=test", "-enable-namespaces", + "-consul-api-timeout=5s", } responseCode := cmd.Run(args) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -274,12 +282,13 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { t.Run(k8sNamespaceFlag, func(t *testing.T) { k8s, testAgent := completeSetup(t) setUpK8sServiceAccount(t, k8s, k8sNamespaceFlag) + defer testAgent.Stop() + require := require.New(t) ui := cli.NewMockUi() firstRunArgs := []string{ - "-addresses=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace", k8sNamespaceFlag, "-client", @@ -294,10 +303,12 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { "-terminating-gateway-name=tgw", "-terminating-gateway-name=anothertgw", "-controller", + "-consul-api-timeout=5s", } // Our second run, we're going to update from partitions and namespaces disabled to // namespaces enabled with a single destination ns and partitions enabled. secondRunArgs := append(firstRunArgs, + "-enable-partitions", "-partition=default", "-enable-namespaces", "-consul-sync-destination-namespace=sync", @@ -309,14 +320,14 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { clientset: k8s, } responseCode := cmd.Run(firstRunArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) bootToken := getBootToken(t, k8s, resourcePrefix, k8sNamespaceFlag) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Check that the expected policies were created. firstRunExpectedPolicies := []string{ @@ -326,37 +337,37 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { "mesh-gateway-policy", "snapshot-agent-policy", "enterprise-license-token", - "igw-policy", - "anotherigw-policy", - "tgw-policy", - "anothertgw-policy", + resourcePrefix + "-igw-policy", + resourcePrefix + "-anotherigw-policy", + resourcePrefix + "-tgw-policy", + resourcePrefix + "-anothertgw-policy", "connect-inject-policy", "controller-policy", } policies, _, err := consul.ACL().PolicyList(nil) - require.NoError(t, err) + require.NoError(err) // Check that we have the right number of policies. The actual // policies will have two more than expected because of the // global management and namespace management polices that // are automatically created, the latter in consul-ent v1.7+. - require.Equal(t, len(firstRunExpectedPolicies), len(policies)-2) + require.Equal(len(firstRunExpectedPolicies), len(policies)-2) // Collect the actual policies into a map to make it easier to assert // on their existence and contents. actualPolicies := make(map[string]string) for _, p := range policies { policy, _, err := consul.ACL().PolicyRead(p.ID, nil) - require.NoError(t, err) + require.NoError(err) actualPolicies[p.Name] = policy.Rules } for _, expected := range firstRunExpectedPolicies { actRules, ok := actualPolicies[expected] - require.True(t, ok, "Did not find policy %s", expected) + require.True(ok, "Did not find policy %s", expected) // We assert that the policy doesn't have any namespace config // in it because later that's what we're using to test that it // got updated. - require.NotContains(t, actRules, "namespace") + require.NotContains(actRules, "namespace") } // Re-run the command with namespace flags. The policies should be updated. @@ -367,7 +378,7 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { clientset: k8s, } responseCode = cmd.Run(secondRunArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Check that the policies have all been updated. secondRunExpectedPolicies := []string{ @@ -379,50 +390,50 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { "snapshot-agent-policy", "enterprise-license-token", "cross-namespace-policy", - "igw-policy", - "anotherigw-policy", - "tgw-policy", - "anothertgw-policy", + resourcePrefix + "-igw-policy", + resourcePrefix + "-anotherigw-policy", + resourcePrefix + "-tgw-policy", + resourcePrefix + "-anothertgw-policy", "controller-policy", "partitions-token", } policies, _, err = consul.ACL().PolicyList(nil) - require.NoError(t, err) + require.NoError(err) // Check that we have the right number of policies. The actual // policies will have two more than expected because of the // global management and namespace management polices that // are automatically created, the latter in consul-ent v1.7+. - require.Equal(t, len(secondRunExpectedPolicies), len(policies)-2) + require.Equal(len(secondRunExpectedPolicies), len(policies)-2) // Collect the actual policies into a map to make it easier to assert // on their existence and contents. actualPolicies = make(map[string]string) for _, p := range policies { policy, _, err := consul.ACL().PolicyRead(p.ID, nil) - require.NoError(t, err) + require.NoError(err) actualPolicies[p.Name] = policy.Rules } for _, expected := range secondRunExpectedPolicies { actRules, ok := actualPolicies[expected] - require.True(t, ok, "Did not find policy %s", expected) + require.True(ok, "Did not find policy %s", expected) switch expected { case "connect-inject-policy": // The connect inject token doesn't have namespace config, // but does change to operator:write from an empty string. - require.Contains(t, actRules, "policy = \"write\"") + require.Contains(actRules, "policy = \"write\"") case "snapshot-agent-policy", "enterprise-license-token": // The snapshot agent and enterprise license tokens shouldn't change. - require.NotContains(t, actRules, "namespace") - require.Contains(t, actRules, "acl = \"write\"") + require.NotContains(actRules, "namespace") + require.Contains(actRules, "acl = \"write\"") case "partitions-token": - require.Contains(t, actRules, "operator = \"write\"") + require.Contains(actRules, "operator = \"write\"") default: // Assert that the policies have the word namespace in them. This // tests that they were updated. The actual contents are tested // in rules_test.go. - require.Contains(t, actRules, "namespace") + require.Contains(actRules, "namespace") } } }) @@ -583,18 +594,21 @@ func TestRun_ConnectInject_Updates(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { + require := require.New(tt) k8s, testAgent := completeSetup(tt) + defer testAgent.Stop() setUpK8sServiceAccount(tt, k8s, ns) ui := cli.NewMockUi() defaultArgs := []string{ - "-addresses=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, + "-enable-partitions", "-partition=default", "-connect-inject", + "-consul-api-timeout=5s", } // First run. NOTE: we don't assert anything here since we've @@ -605,7 +619,7 @@ func TestRun_ConnectInject_Updates(t *testing.T) { clientset: k8s, } responseCode := cmd.Run(append(defaultArgs, c.FirstRunArgs...)) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Second run. // NOTE: We're redefining the command so that the old flag values are @@ -615,39 +629,39 @@ func TestRun_ConnectInject_Updates(t *testing.T) { clientset: k8s, } responseCode = cmd.Run(append(defaultArgs, c.SecondRunArgs...)) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Now check that everything is as expected. bootToken := getBootToken(tt, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Check the ACL auth method is as expected. authMethodName := resourcePrefix + "-k8s-auth-method" method, _, err := consul.ACL().AuthMethodRead(authMethodName, &api.QueryOptions{ Namespace: c.AuthMethodExpectedNS, }) - require.NoError(t, err) - require.NotNil(t, method, authMethodName+" not found") + require.NoError(err) + require.NotNil(method, authMethodName+" not found") if c.AuthMethodExpectMapNamespacesConfig { - require.Contains(t, method.Config, "MapNamespaces") - require.Contains(t, method.Config, "ConsulNamespacePrefix") - require.Equal(t, true, method.Config["MapNamespaces"]) - require.Equal(t, c.AuthMethodExpectedNamespacePrefixConfig, method.Config["ConsulNamespacePrefix"]) + require.Contains(method.Config, "MapNamespaces") + require.Contains(method.Config, "ConsulNamespacePrefix") + require.Equal(true, method.Config["MapNamespaces"]) + require.Equal(c.AuthMethodExpectedNamespacePrefixConfig, method.Config["ConsulNamespacePrefix"]) } else { - require.NotContains(t, method.Config, "MapNamespaces") - require.NotContains(t, method.Config, "ConsulNamespacePrefix") + require.NotContains(method.Config, "MapNamespaces") + require.NotContains(method.Config, "ConsulNamespacePrefix") } // Check the binding rule is as expected. rules, _, err := consul.ACL().BindingRuleList(authMethodName, &api.QueryOptions{ Namespace: c.BindingRuleExpectedNS, }) - require.NoError(t, err) - require.Len(t, rules, 1) + require.NoError(err) + require.Len(rules, 1) }) } } @@ -678,7 +692,7 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { LocalToken: false, }, "partitions token": { - TokenFlags: []string{"-partition=default"}, + TokenFlags: []string{"-enable-partitions", "-partition=default"}, PolicyNames: []string{"partitions-token"}, PolicyDCs: []string{"dc1"}, SecretNames: []string{resourcePrefix + "-partitions-acl-token"}, @@ -689,6 +703,8 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { t.Run(testName, func(t *testing.T) { k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) + defer testSvr.Stop() + require := require.New(t) // Run the command. ui := cli.NewMockUi() @@ -698,41 +714,42 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { } cmd.init() cmdArgs := append([]string{ - "-addresses", strings.Split(testSvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testSvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testSvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, + "-enable-partitions", "-partition=default", "-enable-namespaces", + "-consul-api-timeout=5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Check that the expected policy was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testSvr.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Check that the expected policy was created. for i := range c.PolicyNames { policy := policyExists(t, c.PolicyNames[i], consul) - require.Equal(t, c.PolicyDCs, policy.Datacenters) + require.Equal(c.PolicyDCs, policy.Datacenters) // Test that the token was created as a Kubernetes Secret. tokenSecret, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), c.SecretNames[i], metav1.GetOptions{}) - require.NoError(t, err) - require.NotNil(t, tokenSecret) + require.NoError(err) + require.NotNil(tokenSecret) token, ok := tokenSecret.Data["token"] - require.True(t, ok) + require.True(ok) // Test that the token has the expected policies in Consul. tokenData, _, err := consul.ACL().TokenReadSelf(&api.QueryOptions{Token: string(token)}) - require.NoError(t, err) - require.Equal(t, c.PolicyNames[i], tokenData.Policies[0].Name) - require.Equal(t, c.LocalToken, tokenData.Local) + require.NoError(err) + require.Equal(c.PolicyNames[i], tokenData.Policies[0].Name) + require.Equal(c.LocalToken, tokenData.Local) } // Test that if the same command is run again, it doesn't error. @@ -744,7 +761,7 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { } cmd.init() responseCode := cmd.Run(cmdArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) }) }) } @@ -765,9 +782,9 @@ func TestRun_GatewayNamespaceParsing(t *testing.T) { TokenFlags: []string{"-ingress-gateway-name=ingress", "-ingress-gateway-name=gateway", "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -814,9 +831,9 @@ partition "default" { TokenFlags: []string{"-ingress-gateway-name=ingress.", "-ingress-gateway-name=gateway.namespace1", "-ingress-gateway-name=another-gateway.namespace2"}, - PolicyNames: []string{"ingress-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -863,9 +880,9 @@ partition "default" { TokenFlags: []string{"-terminating-gateway-name=terminating", "-terminating-gateway-name=gateway", "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -903,9 +920,9 @@ partition "default" { TokenFlags: []string{"-terminating-gateway-name=terminating.", "-terminating-gateway-name=gateway.namespace1", "-terminating-gateway-name=another-gateway.namespace2"}, - PolicyNames: []string{"terminating-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -942,7 +959,9 @@ partition "default" { for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) + require := require.New(t) // Run the command. ui := cli.NewMockUi() @@ -953,31 +972,32 @@ partition "default" { cmd.init() cmdArgs := append([]string{ "-k8s-namespace=" + ns, - "-addresses", strings.Split(testSvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testSvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testSvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-enable-namespaces=true", + "-enable-partitions", "-partition=default", + "-consul-api-timeout=5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Check that the expected policy was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testSvr.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) for i := range c.PolicyNames { policy := policyExists(t, c.PolicyNames[i], consul) fullPolicy, _, err := consul.ACL().PolicyRead(policy.ID, nil) - require.NoError(t, err) - require.Equal(t, c.ExpectedPolicies[i], fullPolicy.Rules) + require.NoError(err) + require.Equal(c.ExpectedPolicies[i], fullPolicy.Rules) } // Test that if the same command is run again, it doesn't error. @@ -989,7 +1009,7 @@ partition "default" { } cmd.init() responseCode := cmd.Run(cmdArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) }) }) } @@ -1011,7 +1031,7 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { ComponentName: "connect-injector", TokenFlags: []string{"-connect-inject"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role"}, Namespace: ns, GlobalToken: false, }, @@ -1029,6 +1049,7 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_PrimaryDatacenter(t *testing.T) serviceAccountName := fmt.Sprintf("%s-%s", resourcePrefix, c.ComponentName) k8s, testSvr := completeSetup(t) + defer testSvr.Stop() _, jwtToken := setUpK8sServiceAccount(t, k8s, c.Namespace) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1057,16 +1078,16 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_PrimaryDatacenter(t *testing.T) "-enable-namespaces", "-consul-inject-destination-namespace", c.Namespace, "-auth-method-host=" + k8sMockServer.URL, - "-addresses", strings.Split(testSvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testSvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testSvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout=5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) client, err := api.NewClient(&api.Config{ - Address: testSvr.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, }) require.NoError(t, err) @@ -1102,7 +1123,7 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_SecondaryDatacenter(t *testing. { ComponentName: "connect-injector", TokenFlags: []string{"-connect-inject"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role-dc2"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role-dc2"}, Namespace: ns, GlobalToken: true, }, @@ -1121,7 +1142,8 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_SecondaryDatacenter(t *testing. authMethodName := fmt.Sprintf("%s-%s-%s", resourcePrefix, componentAuthMethod, "dc2") serviceAccountName := fmt.Sprintf("%s-%s", resourcePrefix, c.ComponentName) - k8s, _, consulHTTPAddr, consulGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, _, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) + defer cleanup() _, jwtToken := setUpK8sServiceAccount(t, k8s, c.Namespace) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1152,9 +1174,9 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_SecondaryDatacenter(t *testing. "-consul-inject-destination-namespace", c.Namespace, "-acl-replication-token-file", tokenFile, "-auth-method-host=" + k8sMockServer.URL, - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + "-consul-api-timeout=5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) @@ -1189,12 +1211,13 @@ func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { t.Parallel() k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) partitionToken := "123e4567-e89b-12d3-a456-426614174000" partitionTokenFile, err := os.CreateTemp("", "partitiontoken") require.NoError(t, err) - defer os.RemoveAll(partitionTokenFile.Name()) + defer os.Remove(partitionTokenFile.Name()) partitionTokenFile.WriteString(partitionToken) // Run the command. @@ -1207,12 +1230,13 @@ func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { cmdArgs := []string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testSvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testSvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testSvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-enable-partitions", "-partition=default", "-partition-token-file", partitionTokenFile.Name(), + "-consul-api-timeout=5s", } responseCode := cmd.Run(cmdArgs) @@ -1220,7 +1244,7 @@ func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { // Check that this token is created. consul, err := api.NewClient(&api.Config{ - Address: testSvr.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: partitionToken, }) require.NoError(t, err) @@ -1253,30 +1277,35 @@ func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { // a client in the provided partitionName. The bootToken is the token used as the bootstrap token // for both the client and the server. The helper creates a server, then creates a partition with // the provided partitionName and then creates a client in said partition. -func partitionedSetup(t *testing.T, bootToken string, partitionName string) *testutil.TestServer { - server := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { +func partitionedSetup(t *testing.T, bootToken string, partitionName string) (*testutil.TestServer, func()) { + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true c.ACL.Tokens.InitialManagement = bootToken }) + require.NoError(t, err) + server.WaitForLeader(t) - server.Cfg.APIClientConfig.Token = bootToken - serverAPIClient, err := consul.NewClient(server.Cfg.APIClientConfig, 5*time.Second) + serverAPIClient, err := consul.NewClient(&api.Config{ + Address: server.HTTPAddr, + Token: bootToken, + }, 5*time.Second) require.NoError(t, err) _, _, err = serverAPIClient.Partitions().Create(context.Background(), &api.Partition{Name: partitionName}, &api.WriteOptions{}) require.NoError(t, err) - // TODO (ashwin: agentless) remove this sleep. - // Currently we need this sleep in order to ensure the Consul client that is a partition of the Consul server - // can successfully join the server. 3 seconds was the minimum sleep that worked in this situation. - time.Sleep(3 * time.Second) - _ = test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + + partitionedClient, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.Server = false c.Bootstrap = false c.Partition = partitionName - c.RetryJoin = []string{server.TestServer.LANAddr} + c.RetryJoin = []string{server.LANAddr} c.ACL.Enabled = true - c.ACL.Tokens.InitialManagement = bootToken + c.ACL.Tokens.Agent = bootToken }) + require.NoError(t, err) - return server.TestServer + return server, func() { + server.Stop() + partitionedClient.Stop() + } } diff --git a/control-plane/subcommand/server-acl-init/command_test.go b/control-plane/subcommand/server-acl-init/command_test.go index 842c5a69eb..83fa50b3b7 100644 --- a/control-plane/subcommand/server-acl-init/command_test.go +++ b/control-plane/subcommand/server-acl-init/command_test.go @@ -16,14 +16,17 @@ import ( "time" "github.com/hashicorp/consul-k8s/control-plane/helper/cert" + "github.com/hashicorp/consul-k8s/control-plane/helper/go-discover/mocks" "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -47,29 +50,38 @@ func TestRun_FlagValidation(t *testing.T) { }{ { Flags: []string{}, - ExpErr: "-addresses must be set", + ExpErr: "-server-address must be set at least once", }, { - Flags: []string{"-addresses=localhost"}, + Flags: []string{"-server-address=localhost"}, ExpErr: "-resource-prefix must be set", }, { Flags: []string{ "-acl-replication-token-file=/notexist", - "-addresses=localhost", + "-server-address=localhost", "-resource-prefix=prefix"}, + ExpErr: "-consul-api-timeout must be set to a value greater than 0", + }, + { + Flags: []string{ + "-acl-replication-token-file=/notexist", + "-server-address=localhost", + "-resource-prefix=prefix", + "-consul-api-timeout=5s"}, ExpErr: "unable to read token from file \"/notexist\": open /notexist: no such file or directory", }, { Flags: []string{ "-bootstrap-token-file=/notexist", - "-addresses=localhost", - "-resource-prefix=prefix"}, + "-server-address=localhost", + "-resource-prefix=prefix", + "-consul-api-timeout=5s"}, ExpErr: "unable to read token from file \"/notexist\": open /notexist: no such file or directory", }, { Flags: []string{ - "-addresses=localhost", + "-server-address=localhost", "-resource-prefix=prefix", "-sync-consul-node-name=Speci@l_Chars", }, @@ -78,7 +90,7 @@ func TestRun_FlagValidation(t *testing.T) { }, { Flags: []string{ - "-addresses=localhost", + "-server-address=localhost", "-resource-prefix=prefix", "-sync-consul-node-name=5r9OPGfSRXUdGzNjBdAwmhCBrzHDNYs4XjZVR4wp7lSLIzqwS0ta51nBLIN0TMPV-too-long", }, @@ -106,7 +118,8 @@ func TestRun_FlagValidation(t *testing.T) { func TestRun_Defaults(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -118,10 +131,10 @@ func TestRun_Defaults(t *testing.T) { args := []string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(args) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -130,8 +143,10 @@ func TestRun_Defaults(t *testing.T) { bootToken := getBootToken(t, k8s, resourcePrefix, ns) // Check that it has the right policies. - testClient.Cfg.APIClientConfig.Token = bootToken - consul, err := api.NewClient(testClient.Cfg.APIClientConfig) + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) require.NoError(t, err) tokenData, _, err := consul.ACL().TokenReadSelf(nil) require.NoError(t, err) @@ -181,8 +196,9 @@ func TestRun_TokensPrimaryDC(t *testing.T) { } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) + defer testSvr.Stop() // Run the command. ui := cli.NewMockUi() @@ -194,10 +210,10 @@ func TestRun_TokensPrimaryDC(t *testing.T) { cmdArgs := append([]string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) @@ -205,9 +221,10 @@ func TestRun_TokensPrimaryDC(t *testing.T) { // Check that the expected policy was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) - testClient.Cfg.APIClientConfig.Token = bootToken - consul, err := api.NewClient(testClient.Cfg.APIClientConfig) - require.NoError(t, err) + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) require.NoError(t, err) for i := range c.PolicyNames { @@ -246,13 +263,14 @@ func TestRun_TokensPrimaryDC(t *testing.T) { func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) replicationToken := "123e4567-e89b-12d3-a456-426614174000" replicationTokenFile, err := os.CreateTemp("", "replicationtoken") require.NoError(t, err) - defer os.RemoveAll(replicationTokenFile.Name()) + defer os.Remove(replicationTokenFile.Name()) replicationTokenFile.WriteString(replicationToken) // Run the command. @@ -265,12 +283,12 @@ func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { cmdArgs := []string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-create-acl-replication-token", "-acl-replication-token-file", replicationTokenFile.Name(), + "-consul-api-timeout", "5s", } responseCode := cmd.Run(cmdArgs) @@ -278,7 +296,7 @@ func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { // Check that this token is created. consul, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: replicationToken, }) require.NoError(t, err) @@ -333,8 +351,9 @@ func TestRun_TokensReplicatedDC(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - k8s, consul, secondaryAddr, secondaryGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, consul, secondaryAddr, cleanup := mockReplicatedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() // Run the command. ui := cli.NewMockUi() @@ -348,10 +367,10 @@ func TestRun_TokensReplicatedDC(t *testing.T) { "-timeout=1m", "-k8s-namespace=" + ns, "-acl-replication-token-file", tokenFile, - "-addresses", strings.Split(secondaryAddr, ":")[0], - "-http-port", strings.Split(secondaryAddr, ":")[1], - "-grpc-port", strings.Split(secondaryGRPCAddr, ":")[1], + "-server-address", strings.Split(secondaryAddr, ":")[0], + "-server-port", strings.Split(secondaryAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) @@ -411,6 +430,7 @@ func TestRun_TokensWithProvidedBootstrapToken(t *testing.T) { k8s, testAgent := completeBootstrappedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer testAgent.Stop() // Run the command. ui := cli.NewMockUi() @@ -422,17 +442,17 @@ func TestRun_TokensWithProvidedBootstrapToken(t *testing.T) { "-timeout=1m", "-k8s-namespace", ns, "-bootstrap-token-file", tokenFile, - "-addresses", strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port", strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix", resourcePrefix, + "-consul-api-timeout", "5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -495,12 +515,13 @@ func TestRun_AnonymousTokenPolicy(t *testing.T) { flags := c.Flags var k8s *fake.Clientset var consulHTTPAddr string - var consulGRPCAddr string var consul *api.Client if c.SecondaryDC { + var cleanup func() bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" - k8s, consul, consulHTTPAddr, consulGRPCAddr = mockReplicatedSetup(t, bootToken) + k8s, consul, consulHTTPAddr, cleanup = mockReplicatedSetup(t, bootToken) + defer cleanup() tmp, err := os.CreateTemp("", "") require.NoError(t, err) @@ -508,10 +529,10 @@ func TestRun_AnonymousTokenPolicy(t *testing.T) { require.NoError(t, err) flags = append(flags, "-acl-replication-token-file", tmp.Name()) } else { - var testClient *test.TestServerClient - k8s, testClient = completeSetup(t) - consulHTTPAddr = testClient.TestServer.HTTPAddr - consulGRPCAddr = testClient.TestServer.GRPCAddr + var testSvr *testutil.TestServer + k8s, testSvr = completeSetup(t) + defer testSvr.Stop() + consulHTTPAddr = testSvr.HTTPAddr } setUpK8sServiceAccount(t, k8s, ns) @@ -526,9 +547,9 @@ func TestRun_AnonymousTokenPolicy(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, flags...) responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -601,7 +622,8 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { for testName, c := range cases { t.Run(testName, func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() caCert, jwtToken := setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -616,10 +638,10 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-acl-binding-rule-selector=" + bindingRuleSelector, + "-consul-api-timeout", "5s", } cmdArgs = append(cmdArgs, c.flags...) responseCode := cmd.Run(cmdArgs) @@ -627,8 +649,10 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { // Check that the auth method was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) - consul := testClient.APIClient - + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + }) + require.NoError(t, err) authMethodName := resourcePrefix + "-k8s-auth-method" authMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, &api.QueryOptions{Token: bootToken}) @@ -668,7 +692,8 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() caCert, jwtToken := setUpK8sServiceAccount(t, k8s, ns) ui := cli.NewMockUi() @@ -684,18 +709,20 @@ func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-connect-inject", "-acl-binding-rule-selector=" + bindingRuleSelector, + "-consul-api-timeout", "5s", }) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Check that the auth method was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) - consul := testClient.APIClient - + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + }) + require.NoError(t, err) authMethodName := resourcePrefix + "-k8s-auth-method" authMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, &api.QueryOptions{Token: bootToken}) @@ -726,11 +753,12 @@ func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-acl-binding-rule-selector=" + bindingRuleSelector, "-connect-inject", "-auth-method-host=" + kubernetesHost, + "-consul-api-timeout", "5s", }) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -749,11 +777,12 @@ func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { // Test that ACL binding rules are updated if the rule selector changes. func TestRun_BindingRuleUpdates(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) + defer testSvr.Stop() consul, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, }) require.NoError(t, err) @@ -761,10 +790,10 @@ func TestRun_BindingRuleUpdates(t *testing.T) { commonArgs := []string{ "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-connect-inject", + "-consul-api-timeout", "5s", } firstRunArgs := append(commonArgs, "-acl-binding-rule-selector=serviceaccount.name!=default", @@ -828,17 +857,18 @@ func TestRun_BindingRuleUpdates(t *testing.T) { // Test that the catalog sync policy is updated if the Consul node name changes. func TestRun_SyncPolicyUpdates(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) ui := cli.NewMockUi() commonArgs := []string{ "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-sync-catalog", + "-consul-api-timeout", "5s", } firstRunArgs := append(commonArgs, "-sync-consul-node-name=k8s-sync", @@ -859,7 +889,7 @@ func TestRun_SyncPolicyUpdates(t *testing.T) { // Create consul client bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -917,9 +947,10 @@ func TestRun_ErrorsOnDuplicateACLPolicy(t *testing.T) { tokenFile := common.WriteTempFile(t, bootToken) k8s, testAgent := completeBootstrappedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer testAgent.Stop() consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -944,10 +975,10 @@ func TestRun_ErrorsOnDuplicateACLPolicy(t *testing.T) { "-bootstrap-token-file", tokenFile, "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port", strings.Split(testAgent.HTTPAddr, ":")[1], "-sync-catalog", + "-consul-api-timeout", "5s", } responseCode := cmd.Run(cmdArgs) @@ -969,7 +1000,7 @@ func TestRun_DelayedServers(t *testing.T) { t.Parallel() k8s := fake.NewSimpleClientset() setUpK8sServiceAccount(t, k8s, ns) - randomPorts := freeport.GetN(t, 7) + randomPorts := freeport.GetN(t, 6) ui := cli.NewMockUi() cmd := Command{ @@ -986,16 +1017,16 @@ func TestRun_DelayedServers(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=127.0.0.1", - "-http-port=" + strconv.Itoa(randomPorts[1]), - "-grpc-port=" + strconv.Itoa(randomPorts[2]), + "-server-address=127.0.0.1", + "-server-port=" + strconv.Itoa(randomPorts[1]), + "-consul-api-timeout", "5s", }) close(done) }() // Asynchronously start the test server after a delay. testServerReady := make(chan bool) - var srv *test.TestServerClient + var srv *testutil.TestServer go func() { // Start the servers after a delay between 100 and 500ms. // It's randomized to ensure we're not relying on specific timing. @@ -1003,17 +1034,16 @@ func TestRun_DelayedServers(t *testing.T) { time.Sleep(time.Duration(delay) * time.Millisecond) var err error - srv = test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + srv, err = testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true c.Ports = &testutil.TestPortConfig{ DNS: randomPorts[0], HTTP: randomPorts[1], - GRPC: randomPorts[2], - HTTPS: randomPorts[3], - SerfLan: randomPorts[4], - SerfWan: randomPorts[5], - Server: randomPorts[6], + HTTPS: randomPorts[2], + SerfLan: randomPorts[3], + SerfWan: randomPorts[4], + Server: randomPorts[5], } }) require.NoError(t, err) @@ -1023,7 +1053,7 @@ func TestRun_DelayedServers(t *testing.T) { // Wait for server to come up select { case <-testServerReady: - defer srv.TestServer.Stop() + defer srv.Stop() case <-time.After(5 * time.Second): require.FailNow(t, "test server took longer than 5s to come up") } @@ -1041,7 +1071,7 @@ func TestRun_DelayedServers(t *testing.T) { // Check that it has the right policies. consul, err := api.NewClient(&api.Config{ - Address: srv.TestServer.HTTPAddr, + Address: srv.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -1116,15 +1146,12 @@ func TestRun_NoLeader(t *testing.T) { serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - port, err := strconv.Atoi(serverURL.Port()) - require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort(serverURL.Hostname(), port), } done := make(chan bool) @@ -1134,8 +1161,9 @@ func TestRun_NoLeader(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=" + serverURL.Hostname(), - "-http-port=" + serverURL.Port(), + "-server-address=" + serverURL.Hostname(), + "-server-port=" + serverURL.Port(), + "-consul-api-timeout", "5s", }) close(done) }() @@ -1143,8 +1171,8 @@ func TestRun_NoLeader(t *testing.T) { select { case <-done: require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) - case <-time.After(15 * time.Second): - require.FailNow(t, "command did not complete within 15s") + case <-time.After(5 * time.Second): + require.FailNow(t, "command did not complete within 5s") } // Test that the bootstrap kube secret is created. @@ -1372,22 +1400,20 @@ func TestRun_ClientPolicyAndBindingRuleRetry(t *testing.T) { serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - port, err := strconv.Atoi(serverURL.Port()) - require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort(serverURL.Hostname(), port), } responseCode := cmd.Run([]string{ "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=" + serverURL.Hostname(), - "-http-port=" + serverURL.Port(), + "-server-address=" + serverURL.Hostname(), + "-server-port=" + serverURL.Port(), + "-consul-api-timeout", "5s", }) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -1505,16 +1531,15 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - port, err := strconv.Atoi(serverURL.Port()) - require.NoError(t, err) setUpK8sServiceAccount(t, k8s, ns) cmdArgs := []string{ "-timeout=500ms", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=" + serverURL.Hostname(), - "-http-port=" + serverURL.Port(), + "-server-address=" + serverURL.Hostname(), + "-server-port=" + serverURL.Port(), + "-consul-api-timeout", "5s", } // Create the bootstrap secret. @@ -1536,7 +1561,7 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { // Write token to a file. bootTokenFile, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(bootTokenFile.Name()) + defer os.Remove(bootTokenFile.Name()) _, err = bootTokenFile.WriteString("old-token") require.NoError(t, err) @@ -1550,7 +1575,6 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort(serverURL.Hostname(), port), } responseCode := cmd.Run(cmdArgs) @@ -1637,14 +1661,15 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" k8s, testAgent := completeBootstrappedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer testAgent.Stop() cmdArgs := []string{ "-timeout=1m", "-k8s-namespace", ns, - "-addresses", strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port", strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix", resourcePrefix, + "-consul-api-timeout", "5s", } if tokenInK8sSecret { @@ -1661,7 +1686,7 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { // Write token to a file. bootTokenFile, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(bootTokenFile.Name()) + defer os.Remove(bootTokenFile.Name()) _, err = bootTokenFile.WriteString(bootToken) require.NoError(t, err) @@ -1671,7 +1696,7 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { } consulClient, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -1681,7 +1706,6 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { clientset: k8s, } - cmd.init() // Create the server policy and token _before_ we run the command. agentPolicyRules, err := cmd.agentRules() require.NoError(t, err) @@ -1692,7 +1716,7 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { }, nil) require.NoError(t, err) _, _, err = consulClient.ACL().TokenCreate(&api.ACLToken{ - Description: fmt.Sprintf("Server Token for %s", strings.Split(testAgent.TestServer.HTTPAddr, ":")[0]), + Description: fmt.Sprintf("Server Token for %s", strings.Split(testAgent.HTTPAddr, ":")[0]), Policies: []*api.ACLTokenPolicyLink{ { Name: policy.Name, @@ -1756,25 +1780,23 @@ func TestRun_SkipBootstrapping_WhenServersAreDisabled(t *testing.T) { serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - port, err := strconv.Atoi(serverURL.Port()) - require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort(serverURL.Hostname(), port), } responseCode := cmd.Run([]string{ "-timeout=500ms", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=" + serverURL.Hostname(), - "-http-port=" + serverURL.Port(), + "-server-address=" + serverURL.Hostname(), + "-server-port=" + serverURL.Port(), "-bootstrap-token-file=" + tokenFile, "-set-server-tokens=false", "-client=false", // disable client token, so there are fewer calls + "-consul-api-timeout", "5s", }) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -1796,30 +1818,69 @@ func TestRun_SkipBootstrapping_WhenServersAreDisabled(t *testing.T) { // Test that we exit after timeout. func TestRun_Timeout(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s := fake.NewSimpleClientset() + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + + responseCode := cmd.Run([]string{ + "-timeout=500ms", + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-server-address=foo", + "-consul-api-timeout", "5s", + }) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) +} + +// Test that the bootstrapping process can make calls to Consul API over HTTPS +// when the consul agent is configured with HTTPS. +func TestRun_HTTPS(t *testing.T) { + t.Parallel() + k8s := fake.NewSimpleClientset() setUpK8sServiceAccount(t, k8s, ns) - _, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + caFile, certFile, keyFile := test.GenerateServerCerts(t) + + srv, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + + c.CAFile = caFile + c.CertFile = certFile + c.KeyFile = keyFile }) require.NoError(t, err) - ui := cli.NewMockUi() + defer srv.Stop() + // Run the command. + ui := cli.NewMockUi() cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort("localhost", 12345), } responseCode := cmd.Run([]string{ - "-timeout=500ms", + "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-use-https", + "-consul-tls-server-name", "server.dc1.consul", + "-consul-ca-cert", caFile, + "-server-address=" + strings.Split(srv.HTTPSAddr, ":")[0], + "-server-port=" + strings.Split(srv.HTTPSAddr, ":")[1], + "-consul-api-timeout", "5s", }) - require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Test that the bootstrap token is created to make sure the bootstrapping succeeded. + // The presence of the bootstrap token tells us that the API calls to Consul have been successful. + tokenSecret, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), resourcePrefix+"-bootstrap-acl-token", metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, tokenSecret) + _, ok := tokenSecret.Data["token"] + require.True(t, ok) } // Test that the ACL replication token created from the primary DC can be used @@ -1827,7 +1888,8 @@ func TestRun_Timeout(t *testing.T) { func TestRun_ACLReplicationTokenValid(t *testing.T) { t.Parallel() - secondaryK8s, secondaryConsulClient, secondaryAddr, secondaryGRPCAddr, aclReplicationToken := completeReplicatedSetup(t) + secondaryK8s, secondaryConsulClient, secondaryAddr, aclReplicationToken, clean := completeReplicatedSetup(t) + defer clean() setUpK8sServiceAccount(t, secondaryK8s, ns) // completeReplicatedSetup ran the command in our primary dc so now we @@ -1843,14 +1905,14 @@ func TestRun_ACLReplicationTokenValid(t *testing.T) { "-federation", "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(secondaryAddr, ":")[0], - "-http-port", strings.Split(secondaryAddr, ":")[1], - "-grpc-port", strings.Split(secondaryGRPCAddr, ":")[1], + "-server-address", strings.Split(secondaryAddr, ":")[0], + "-server-port", strings.Split(secondaryAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-acl-replication-token-file", tokenFile, "-auth-method-host=" + "https://my-kube.com", "-client", "-mesh-gateway", + "-consul-api-timeout", "5s", } responseCode := secondaryCmd.Run(secondaryCmdArgs) require.Equal(t, 0, responseCode, secondaryUI.ErrorWriter.String()) @@ -1885,8 +1947,9 @@ func TestRun_AnonPolicy_IgnoredWithReplication(t *testing.T) { t.Run(flag, func(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - k8s, consul, serverAddr, serverGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, consul, serverAddr, cleanup := mockReplicatedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() // Run the command. ui := cli.NewMockUi() @@ -1899,10 +1962,10 @@ func TestRun_AnonPolicy_IgnoredWithReplication(t *testing.T) { "-timeout=1m", "-k8s-namespace=" + ns, "-acl-replication-token-file", tokenFile, - "-addresses", strings.Split(serverAddr, ":")[0], - "-http-port", strings.Split(serverAddr, ":")[1], - "-grpc-port", strings.Split(serverGRPCAddr, ":")[1], + "-server-address", strings.Split(serverAddr, ":")[0], + "-server-port", strings.Split(serverAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", }, flag) responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -1919,6 +1982,63 @@ func TestRun_AnonPolicy_IgnoredWithReplication(t *testing.T) { } } +// Test that when the -server-address contains a cloud-auto join string, +// we are still able to bootstrap ACLs. +func TestRun_CloudAutoJoin(t *testing.T) { + t.Parallel() + + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() + setUpK8sServiceAccount(t, k8s, ns) + + // create a mock provider + // that always returns the server address + // provided through the cloud-auto join string + provider := new(mocks.MockProvider) + // create stubs for our MockProvider so that it returns + // the address of the test agent + provider.On("Addrs", mock.Anything, mock.Anything).Return([]string{"127.0.0.1"}, nil) + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + providers: map[string]discover.Provider{"mock": provider}, + } + args := []string{ + "-timeout=1m", + "-k8s-namespace=" + ns, + "-resource-prefix=" + resourcePrefix, + "-server-address", "provider=mock", + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout", "5s", + } + responseCode := cmd.Run(args) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // check that the provider has been called + provider.AssertNumberOfCalls(t, "Addrs", 1) + + // Test that the bootstrap kube secret is created. + bootToken := getBootToken(t, k8s, resourcePrefix, ns) + + // Check that it has the right policies. + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) + require.NoError(t, err) + tokenData, _, err := consul.ACL().TokenReadSelf(nil) + require.NoError(t, err) + require.Equal(t, "global-management", tokenData.Policies[0].Name) + + // Check that the agent policy was created. + agentPolicy := policyExists(t, "agent-token", consul) + // Should be a global policy. + require.Len(t, agentPolicy.Datacenters, 0) +} + func TestRun_GatewayErrors(t *testing.T) { t.Parallel() @@ -1947,7 +2067,8 @@ func TestRun_GatewayErrors(t *testing.T) { for testName, c := range cases { t.Run(testName, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) + k8s, testSvr := completeSetup(tt) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) require := require.New(tt) @@ -1961,9 +2082,9 @@ func TestRun_GatewayErrors(t *testing.T) { "-timeout=500ms", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout", "5s", } cmdArgs = append(cmdArgs, c.flags...) responseCode := cmd.Run(cmdArgs) @@ -1996,7 +2117,7 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) TestName: "Connect Inject", TokenFlags: []string{"-connect-inject"}, PolicyNames: []string{"connect-inject-policy"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role"}, }, { TestName: "Sync Catalog", @@ -2033,9 +2154,9 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) TokenFlags: []string{"-terminating-gateway-name=terminating", "-terminating-gateway-name=gateway", "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, Roles: []string{resourcePrefix + "-terminating-acl-role", resourcePrefix + "-gateway-acl-role", resourcePrefix + "-another-gateway-acl-role"}, @@ -2045,9 +2166,9 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) TokenFlags: []string{"-ingress-gateway-name=ingress", "-ingress-gateway-name=gateway", "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, Roles: []string{resourcePrefix + "-ingress-acl-role", resourcePrefix + "-gateway-acl-role", resourcePrefix + "-another-gateway-acl-role"}, @@ -2055,7 +2176,8 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -2068,9 +2190,9 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) "-timeout=500ms", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) @@ -2078,7 +2200,7 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -2154,7 +2276,7 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { TestName: "Connect Inject", TokenFlags: []string{"-connect-inject"}, PolicyNames: []string{"connect-inject-policy-" + secondaryDatacenter}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role-" + secondaryDatacenter}, GlobalAuthMethod: false, }, { @@ -2197,9 +2319,9 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { TokenFlags: []string{"-terminating-gateway-name=terminating", "-terminating-gateway-name=gateway", "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-policy-" + secondaryDatacenter, - "gateway-policy-" + secondaryDatacenter, - "another-gateway-policy-" + secondaryDatacenter}, + PolicyNames: []string{resourcePrefix + "-terminating-policy-" + secondaryDatacenter, + resourcePrefix + "-gateway-policy-" + secondaryDatacenter, + resourcePrefix + "-another-gateway-policy-" + secondaryDatacenter}, Roles: []string{resourcePrefix + "-terminating-acl-role-" + secondaryDatacenter, resourcePrefix + "-gateway-acl-role-" + secondaryDatacenter, resourcePrefix + "-another-gateway-acl-role-" + secondaryDatacenter}, @@ -2210,9 +2332,9 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { TokenFlags: []string{"-ingress-gateway-name=ingress", "-ingress-gateway-name=gateway", "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-policy-" + secondaryDatacenter, - "gateway-policy-" + secondaryDatacenter, - "another-gateway-policy-" + secondaryDatacenter}, + PolicyNames: []string{resourcePrefix + "-ingress-policy-" + secondaryDatacenter, + resourcePrefix + "-gateway-policy-" + secondaryDatacenter, + resourcePrefix + "-another-gateway-policy-" + secondaryDatacenter}, Roles: []string{resourcePrefix + "-ingress-acl-role-" + secondaryDatacenter, resourcePrefix + "-gateway-acl-role-" + secondaryDatacenter, resourcePrefix + "-another-gateway-acl-role-" + secondaryDatacenter}, @@ -2223,8 +2345,9 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { t.Run(c.TestName, func(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - k8s, consul, consulHTTPAddr, consulGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, consul, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() // Run the command. ui := cli.NewMockUi() @@ -2239,9 +2362,9 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { "-k8s-namespace=" + ns, "-auth-method-host=" + "https://my-kube.com", "-acl-replication-token-file", tokenFile, - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) @@ -2319,7 +2442,7 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { { ComponentName: "connect-injector", TokenFlags: []string{"-connect-inject"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role"}, GlobalToken: false, }, { @@ -2335,11 +2458,10 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { GlobalToken: false, }, { - ComponentName: "snapshot-agent", - TokenFlags: []string{"-snapshot-agent"}, - Roles: []string{resourcePrefix + "-snapshot-agent-acl-role"}, - GlobalToken: false, - ServiceAccountName: resourcePrefix + "-server", + ComponentName: "snapshot-agent", + TokenFlags: []string{"-snapshot-agent"}, + Roles: []string{resourcePrefix + "-snapshot-agent-acl-role"}, + GlobalToken: false, }, { ComponentName: "mesh-gateway", @@ -2382,7 +2504,8 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { serviceAccountName = c.ServiceAccountName } - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() _, jwtToken := setUpK8sServiceAccount(t, k8s, ns) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -2408,16 +2531,16 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-auth-method-host=" + k8sMockServer.URL, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) client, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, }) require.NoError(t, err) @@ -2461,7 +2584,7 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { { ComponentName: "connect-injector", TokenFlags: []string{"-connect-inject"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role-dc2"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role-dc2"}, GlobalAuthMethod: false, GlobalToken: false, }, @@ -2480,12 +2603,11 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { GlobalToken: true, }, { - ComponentName: "snapshot-agent", - TokenFlags: []string{"-snapshot-agent"}, - Roles: []string{resourcePrefix + "-snapshot-agent-acl-role-dc2"}, - GlobalAuthMethod: false, - GlobalToken: false, - ServiceAccountName: resourcePrefix + "-server", + ComponentName: "snapshot-agent", + TokenFlags: []string{"-snapshot-agent"}, + Roles: []string{resourcePrefix + "-snapshot-agent-acl-role-dc2"}, + GlobalAuthMethod: false, + GlobalToken: false, }, { ComponentName: "mesh-gateway", @@ -2537,7 +2659,8 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { serviceAccountName = c.ServiceAccountName } - k8s, _, consulHTTPAddr, consulGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, _, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) + defer cleanup() _, jwtToken := setUpK8sServiceAccount(t, k8s, ns) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -2565,9 +2688,9 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { "-k8s-namespace=" + ns, "-acl-replication-token-file", tokenFile, "-auth-method-host=" + k8sMockServer.URL, - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) @@ -2605,8 +2728,9 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { func TestRun_PrimaryDatacenter_ComponentAuthMethod(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) + defer testSvr.Stop() // Run the command. ui := cli.NewMockUi() @@ -2618,10 +2742,10 @@ func TestRun_PrimaryDatacenter_ComponentAuthMethod(t *testing.T) { cmdArgs := []string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(cmdArgs) @@ -2629,9 +2753,10 @@ func TestRun_PrimaryDatacenter_ComponentAuthMethod(t *testing.T) { // Check that the expected policy was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) - consulConfig := testClient.Cfg - consulConfig.APIClientConfig.Token = bootToken - consulClient, err := api.NewClient(consulConfig.APIClientConfig) + consulClient, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) require.NoError(t, err) authMethod, _, err := consulClient.ACL().AuthMethodRead(resourcePrefix+"-k8s-component-auth-method", &api.QueryOptions{}) require.NoError(t, err) @@ -2645,8 +2770,9 @@ func TestRun_SecondaryDatacenter_ComponentAuthMethod(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - k8s, consul, consulHTTPAddr, consulGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, consul, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() // Run the command. ui := cli.NewMockUi() @@ -2661,10 +2787,10 @@ func TestRun_SecondaryDatacenter_ComponentAuthMethod(t *testing.T) { "-k8s-namespace=" + ns, "-auth-method-host=" + "https://my-kube.com", "-acl-replication-token-file", tokenFile, - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(cmdArgs) @@ -2681,25 +2807,29 @@ func TestRun_SecondaryDatacenter_ComponentAuthMethod(t *testing.T) { } // Set up test consul agent and kubernetes cluster. -func completeSetup(t *testing.T) (*fake.Clientset, *test.TestServerClient) { +func completeSetup(t *testing.T) (*fake.Clientset, *testutil.TestServer) { k8s := fake.NewSimpleClientset() - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + svr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true }) + require.NoError(t, err) + svr.WaitForLeader(t) - return k8s, testClient + return k8s, svr } // Set up test consul agent and kubernetes cluster. // The consul agent is bootstrapped with the master token. -func completeBootstrappedSetup(t *testing.T, masterToken string) (*fake.Clientset, *test.TestServerClient) { +func completeBootstrappedSetup(t *testing.T, masterToken string) (*fake.Clientset, *testutil.TestServer) { k8s := fake.NewSimpleClientset() - svr := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + svr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true c.ACL.Tokens.InitialManagement = masterToken }) + require.NoError(t, err) + svr.WaitForActiveCARoot(t) return k8s, svr } @@ -2711,7 +2841,7 @@ func completeBootstrappedSetup(t *testing.T, masterToken string) (*fake.Clientse // the address of the secondary Consul server, // the replication token generated and a cleanup function // that should be called at the end of the test that cleans up resources. -func completeReplicatedSetup(t *testing.T) (*fake.Clientset, *api.Client, string, string, string) { +func completeReplicatedSetup(t *testing.T) (*fake.Clientset, *api.Client, string, string, func()) { return replicatedSetup(t, "") } @@ -2724,9 +2854,9 @@ func completeReplicatedSetup(t *testing.T) (*fake.Clientset, *api.Client, string // the address of the secondary Consul server, and a // cleanup function that should be called at the end of the test that cleans // up resources. -func mockReplicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Client, string, string) { - k8sClient, consulClient, serverHTTPAddr, serverGRPCAddr, _ := replicatedSetup(t, bootToken) - return k8sClient, consulClient, serverHTTPAddr, serverGRPCAddr +func mockReplicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Client, string, func()) { + k8sClient, consulClient, serverAddr, _, cleanup := replicatedSetup(t, bootToken) + return k8sClient, consulClient, serverAddr, cleanup } // replicatedSetup is a helper function for completeReplicatedSetup and @@ -2737,16 +2867,20 @@ func mockReplicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api. // the address of the secondary Consul server, ACL replication token, and a // cleanup function that should be called at the end of the test that cleans // up resources. -func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Client, string, string, string) { - primarySvr := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { +func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Client, string, string, func()) { + primarySvr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true if bootToken != "" { c.ACL.Tokens.InitialManagement = bootToken } }) + require.NoError(t, err) + primarySvr.WaitForLeader(t) + var aclReplicationToken string if bootToken == "" { primaryK8s := fake.NewSimpleClientset() + require.NoError(t, err) setUpK8sServiceAccount(t, primaryK8s, ns) // Run the command to bootstrap ACLs @@ -2759,11 +2893,11 @@ func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Clie primaryCmdArgs := []string{ "-federation", "-k8s-namespace=" + ns, - "-addresses", strings.Split(primarySvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(primarySvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(primarySvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(primarySvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(primarySvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-create-acl-replication-token", + "-consul-api-timeout", "5s", } responseCode := primaryCmd.Run(primaryCmdArgs) require.Equal(t, 0, responseCode, primaryUI.ErrorWriter.String()) @@ -2778,7 +2912,7 @@ func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Clie } // Set up the secondary server that will federate with the primary. - secondarySvr := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + secondarySvr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.Datacenter = "dc2" c.ACL.Enabled = true c.ACL.TokenReplication = true @@ -2791,6 +2925,7 @@ func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Clie c.ACL.Tokens.Replication = bootToken } }) + require.NoError(t, err) // Our consul client will use the secondary dc. clientToken := bootToken @@ -2804,29 +2939,32 @@ func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Clie // until ACL replication has started, and ACL replication cannot // be started because we haven't told the secondary where the primary // server is yet. - consulConfig := primarySvr.Cfg - consulConfig.APIClientConfig.Token = bootToken - consulConfig.APIClientConfig.Address = primarySvr.TestServer.HTTPAddr - consul, err := api.NewClient(consulConfig.APIClientConfig) + consul, err := api.NewClient(&api.Config{ + Address: primarySvr.HTTPAddr, + Token: bootToken, + }) require.NoError(t, err) // WAN join primary to the secondary - err = consul.Agent().Join(secondarySvr.TestServer.WANAddr, true) + err = consul.Agent().Join(secondarySvr.WANAddr, true) require.NoError(t, err) - secondarySvr.TestServer.WaitForLeader(t) + secondarySvr.WaitForLeader(t) // Overwrite consul client, pointing it to the secondary DC - consulConfig = secondarySvr.Cfg - consulConfig.APIClientConfig.Token = clientToken - consulConfig.APIClientConfig.Address = secondarySvr.TestServer.HTTPAddr - consul, err = api.NewClient(consulConfig.APIClientConfig) + consul, err = api.NewClient(&api.Config{ + Address: secondarySvr.HTTPAddr, + Token: clientToken, + }) require.NoError(t, err) // Finally, set up our kube cluster. It will use the secondary dc. k8s := fake.NewSimpleClientset() - return k8s, consul, secondarySvr.TestServer.HTTPAddr, secondarySvr.TestServer.GRPCAddr, aclReplicationToken + return k8s, consul, secondarySvr.HTTPAddr, aclReplicationToken, func() { + primarySvr.Stop() + secondarySvr.Stop() + } } // getBootToken gets the bootstrap token from the Kubernetes secret. It will diff --git a/control-plane/subcommand/server-acl-init/create_or_update.go b/control-plane/subcommand/server-acl-init/create_or_update.go index 833b923b90..085372827b 100644 --- a/control-plane/subcommand/server-acl-init/create_or_update.go +++ b/control-plane/subcommand/server-acl-init/create_or_update.go @@ -43,18 +43,18 @@ func (c *Command) createACLPolicyRoleAndBindingRule(componentName, rules, dc, pr ap := &api.ACLRolePolicyLink{ Name: policyName, } - var apl []*api.ACLRolePolicyLink + apl := []*api.ACLRolePolicyLink{} apl = append(apl, ap) // Add the ACLRole and ACLBindingRule. - return c.addRoleAndBindingRule(client, componentName, serviceAccountName, authMethodName, apl, global, primary, primaryDC, dc) + return c.addRoleAndBindingRule(client, serviceAccountName, authMethodName, apl, global, primary, primaryDC, dc) } // addRoleAndBindingRule adds an ACLRole and ACLBindingRule which reference the authMethod. -func (c *Command) addRoleAndBindingRule(client *api.Client, componentName, serviceAccountName, authMethodName string, policies []*api.ACLRolePolicyLink, global, primary bool, primaryDC, dc string) error { +func (c *Command) addRoleAndBindingRule(client *api.Client, serviceAccountName string, authMethodName string, policies []*api.ACLRolePolicyLink, global, primary bool, primaryDC, dc string) error { // This is the ACLRole which will allow the component which uses the serviceaccount // to be able to do a consul login. - aclRoleName := c.withPrefix(fmt.Sprintf("%s-acl-role", componentName)) + aclRoleName := fmt.Sprintf("%s-acl-role", serviceAccountName) if c.flagFederation && !primary { // If performing ACL replication, we must ensure policy names are // globally unique so we append the datacenter name but only in secondary datacenters. diff --git a/control-plane/subcommand/server-acl-init/rules.go b/control-plane/subcommand/server-acl-init/rules.go index 43eca2cc57..8b2dec7a14 100644 --- a/control-plane/subcommand/server-acl-init/rules.go +++ b/control-plane/subcommand/server-acl-init/rules.go @@ -53,9 +53,6 @@ agent_prefix "" { partition_prefix "" { namespace_prefix "" { acl = "write" - service_prefix "" { - policy = "write" - } } }` @@ -178,7 +175,10 @@ func (c *Command) meshGatewayRules() (string, error) { // Mesh gateways can only act as a proxy for services // that its ACL token has access to. So, in the case of // Consul namespaces, it needs access to all namespaces. - meshGatewayRulesTpl := `mesh = "write" + meshGatewayRulesTpl := ` + agent_prefix "" { + policy = "read" + } {{- if .EnableNamespaces }} namespace "default" { {{- end }} @@ -366,7 +366,7 @@ partition "default" { {{- end }} acl = "write" service_prefix "" { - policy = "write" + policy = "read" intentions = "read" } {{- if .EnableNamespaces }} @@ -419,9 +419,9 @@ partition "{{ .PartitionName }}" { func (c *Command) rulesData() rulesData { return rulesData{ - EnablePartitions: c.consulFlags.Partition != "", + EnablePartitions: c.flagEnablePartitions, EnablePeering: c.flagEnablePeering, - PartitionName: c.consulFlags.Partition, + PartitionName: c.flagPartitionName, EnableNamespaces: c.flagEnableNamespaces, SyncConsulDestNS: c.flagConsulSyncDestinationNamespace, SyncEnableNSMirroring: c.flagEnableSyncK8SNSMirroring, diff --git a/control-plane/subcommand/server-acl-init/rules_test.go b/control-plane/subcommand/server-acl-init/rules_test.go index 7c2445bac7..1e736f9a95 100644 --- a/control-plane/subcommand/server-acl-init/rules_test.go +++ b/control-plane/subcommand/server-acl-init/rules_test.go @@ -5,7 +5,6 @@ import ( "strings" "testing" - "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" "github.com/stretchr/testify/require" ) @@ -62,7 +61,8 @@ partition "part-1" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } @@ -127,7 +127,8 @@ partition_prefix "" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } @@ -180,7 +181,6 @@ namespace_prefix "" { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ flagEnableNamespaces: tt.EnableNamespaces, - consulFlags: &flags.ConsulFlags{}, } meshGatewayRules, err := cmd.apiGatewayControllerRules() @@ -199,7 +199,9 @@ func TestMeshGatewayRules(t *testing.T) { }{ { Name: "Namespaces are disabled", - Expected: `mesh = "write" + Expected: `agent_prefix "" { + policy = "read" + } service "mesh-gateway" { policy = "write" } @@ -213,7 +215,9 @@ func TestMeshGatewayRules(t *testing.T) { { Name: "Namespaces are enabled", EnableNamespaces: true, - Expected: `mesh = "write" + Expected: `agent_prefix "" { + policy = "read" + } namespace "default" { service "mesh-gateway" { policy = "write" @@ -234,7 +238,6 @@ namespace_prefix "" { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ flagEnableNamespaces: tt.EnableNamespaces, - consulFlags: &flags.ConsulFlags{}, } meshGatewayRules, err := cmd.meshGatewayRules() @@ -354,7 +357,8 @@ partition "default" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } @@ -460,7 +464,8 @@ partition "default" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } @@ -821,7 +826,8 @@ partition "foo" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, flagConsulSyncDestinationNamespace: tt.ConsulSyncDestinationNamespace, flagEnableSyncK8SNSMirroring: tt.EnableSyncK8SNSMirroring, @@ -938,7 +944,8 @@ partition "part-1" { t.Run(caseName, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, flagEnablePeering: tt.EnablePeering, } @@ -971,7 +978,7 @@ func TestReplicationTokenRules(t *testing.T) { } acl = "write" service_prefix "" { - policy = "write" + policy = "read" intentions = "read" }`, }, @@ -989,7 +996,7 @@ func TestReplicationTokenRules(t *testing.T) { namespace_prefix "" { acl = "write" service_prefix "" { - policy = "write" + policy = "read" intentions = "read" } }`, @@ -1011,7 +1018,7 @@ partition "default" { namespace_prefix "" { acl = "write" service_prefix "" { - policy = "write" + policy = "read" intentions = "read" } } @@ -1022,7 +1029,8 @@ partition "default" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } replicationTokenRules, err := cmd.aclReplicationRules() @@ -1163,7 +1171,8 @@ partition "part-1" { flagConsulInjectDestinationNamespace: tt.DestConsulNS, flagEnableInjectK8SNSMirroring: tt.Mirroring, flagInjectK8SNSMirroringPrefix: tt.MirroringPrefix, - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, } rules, err := cmd.controllerRules() diff --git a/control-plane/subcommand/server-acl-init/servers.go b/control-plane/subcommand/server-acl-init/servers.go index 2dc8f8ab67..7b4f0ef527 100644 --- a/control-plane/subcommand/server-acl-init/servers.go +++ b/control-plane/subcommand/server-acl-init/servers.go @@ -3,7 +3,6 @@ package serveraclinit import ( "errors" "fmt" - "net" "net/http" "strings" "time" @@ -18,15 +17,15 @@ import ( // bootstrapServers bootstraps ACLs and ensures each server has an ACL token. // If bootstrapToken is not empty then ACLs are already bootstrapped. -func (c *Command) bootstrapServers(serverAddresses []net.IPAddr, bootstrapToken, bootTokenSecretName string) (string, error) { +func (c *Command) bootstrapServers(serverAddresses []string, bootstrapToken, bootTokenSecretName, scheme string) (string, error) { // Pick the first server address to connect to for bootstrapping and set up connection. - firstServerAddr := fmt.Sprintf("%s:%d", serverAddresses[0].IP.String(), c.consulFlags.HTTPPort) + firstServerAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) if bootstrapToken == "" { c.log.Info("No bootstrap token from previous installation found, continuing on to bootstrapping") var err error - bootstrapToken, err = c.bootstrapACLs(firstServerAddr, bootTokenSecretName) + bootstrapToken, err = c.bootstrapACLs(firstServerAddr, scheme, bootTokenSecretName) if err != nil { return "", err } @@ -37,8 +36,26 @@ func (c *Command) bootstrapServers(serverAddresses []net.IPAddr, bootstrapToken, // We should only create and set server tokens when servers are running within this cluster. if c.flagSetServerTokens { c.log.Info("Setting Consul server tokens") + + // Override our original client with a new one that has the bootstrap token + // set. + clientConfig := api.DefaultConfig() + clientConfig.Address = firstServerAddr + clientConfig.Scheme = scheme + clientConfig.Token = bootstrapToken + clientConfig.TLSConfig = api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + } + + consulClient, err := consul.NewClient(clientConfig, + c.flagConsulAPITimeout) + if err != nil { + return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) + } + // Create new tokens for each server and apply them. - if err := c.setServerTokens(serverAddresses, bootstrapToken); err != nil { + if err = c.setServerTokens(consulClient, serverAddresses, bootstrapToken, scheme); err != nil { return "", err } } @@ -47,9 +64,14 @@ func (c *Command) bootstrapServers(serverAddresses []net.IPAddr, bootstrapToken, // bootstrapACLs makes the ACL bootstrap API call and writes the bootstrap token // to a kube secret. -func (c *Command) bootstrapACLs(firstServerAddr, bootTokenSecretName string) (string, error) { - config := c.consulFlags.ConsulClientConfig().APIClientConfig - config.Address = firstServerAddr +func (c *Command) bootstrapACLs(firstServerAddr string, scheme string, bootTokenSecretName string) (string, error) { + clientConfig := api.DefaultConfig() + clientConfig.Address = firstServerAddr + clientConfig.Scheme = scheme + clientConfig.TLSConfig = api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + } // Exempting this particular use of the http client from using global.consulAPITimeout // which defaults to 5 seconds. In acceptance tests, we saw that the call // to /v1/acl/bootstrap taking 5-7 seconds and when it does, the request times @@ -58,8 +80,11 @@ func (c *Command) bootstrapACLs(firstServerAddr, bootTokenSecretName string) (st // already bootstrapped and would not be able to complete. // Since this is an area where we have to wait and can't retry, we are setting it // to a large number like 5 minutes since previously this had no timeout. - config.HttpClient = &http.Client{Timeout: 5 * time.Minute} - consulClient, err := consul.NewClient(config, c.consulFlags.APITimeout) + clientConfig.HttpClient = &http.Client{ + Timeout: 5 * time.Minute, + } + consulClient, err := consul.NewClient(clientConfig, + c.flagConsulAPITimeout) if err != nil { return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) @@ -118,22 +143,13 @@ func (c *Command) bootstrapACLs(firstServerAddr, bootTokenSecretName string) (st // setServerTokens creates policies and associated ACL token for each server // and then provides the token to the server. -func (c *Command) setServerTokens(serverAddresses []net.IPAddr, bootstrapToken string) error { - // server specifically. - clientConfig := c.consulFlags.ConsulClientConfig().APIClientConfig - clientConfig.Address = fmt.Sprintf("%s:%d", serverAddresses[0].IP.String(), c.consulFlags.HTTPPort) - clientConfig.Token = bootstrapToken - serverClient, err := consul.NewClient(clientConfig, - c.consulFlags.APITimeout) - if err != nil { - return err - } - agentPolicy, err := c.setServerPolicy(serverClient) +func (c *Command) setServerTokens(consulClient *api.Client, serverAddresses []string, bootstrapToken, scheme string) error { + agentPolicy, err := c.setServerPolicy(consulClient) if err != nil { return err } - existingTokens, _, err := serverClient.ACL().TokenList(nil) + existingTokens, _, err := consulClient.ACL().TokenList(nil) if err != nil { return err } @@ -144,16 +160,22 @@ func (c *Command) setServerTokens(serverAddresses []net.IPAddr, bootstrapToken s // We create a new client for each server because we need to call each // server specifically. - clientConfig := c.consulFlags.ConsulClientConfig().APIClientConfig - clientConfig.Address = fmt.Sprintf("%s:%d", host.IP.String(), c.consulFlags.HTTPPort) + clientConfig := api.DefaultConfig() + clientConfig.Address = fmt.Sprintf("%s:%d", host, c.flagServerPort) + clientConfig.Scheme = scheme clientConfig.Token = bootstrapToken + clientConfig.TLSConfig = api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + } + serverClient, err := consul.NewClient(clientConfig, - c.consulFlags.APITimeout) + c.flagConsulAPITimeout) if err != nil { return err } - tokenDescription := fmt.Sprintf("Server Token for %s", host.IP.String()) + tokenDescription := fmt.Sprintf("Server Token for %s", host) // Check if the token was already created. We're matching on the description // since that's the only part that's unique. diff --git a/control-plane/subcommand/service-address/command.go b/control-plane/subcommand/service-address/command.go new file mode 100644 index 0000000000..91bc600191 --- /dev/null +++ b/control-plane/subcommand/service-address/command.go @@ -0,0 +1,224 @@ +package serviceaddress + +import ( + "context" + "errors" + "flag" + "fmt" + "net" + "os" + "sync" + "time" + + "github.com/cenkalti/backoff" + "github.com/hashicorp/consul-k8s/control-plane/subcommand" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +type Command struct { + UI cli.Ui + + flags *flag.FlagSet + k8sFlags *k8sflags.K8SFlags + + flagNamespace string + flagServiceName string + flagOutputFile string + flagResolveHostnames bool + flagLogLevel string + flagLogJSON bool + + retryDuration time.Duration + k8sClient kubernetes.Interface + once sync.Once + help string + + ctx context.Context +} + +func (c *Command) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.flags.StringVar(&c.flagNamespace, "k8s-namespace", "", + "Kubernetes namespace where service is created") + c.flags.StringVar(&c.flagServiceName, "name", "", + "Name of the service") + c.flags.StringVar(&c.flagOutputFile, "output-file", "", + "Path to file to write load balancer address") + c.flags.BoolVar(&c.flagResolveHostnames, "resolve-hostnames", false, + "If true we will resolve any hostnames and use their first IP address") + c.flags.StringVar(&c.flagLogLevel, "log-level", "info", + "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ + "\"debug\", \"info\", \"warn\", and \"error\".") + c.flags.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + + c.k8sFlags = &k8sflags.K8SFlags{} + flags.Merge(c.flags, c.k8sFlags.Flags()) + c.help = flags.Usage(help, c.flags) +} + +// Run waits until a Kubernetes service has an ingress address and then writes +// it to an output file. +func (c *Command) Run(args []string) int { + c.once.Do(c.init) + if err := c.validateFlags(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if c.k8sClient == nil { + config, err := subcommand.K8SConfig(c.k8sFlags.KubeConfig()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error retrieving Kubernetes auth: %s", err)) + return 1 + } + c.k8sClient, err = kubernetes.NewForConfig(config) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing Kubernetes client: %s", err)) + return 1 + } + } + if c.retryDuration == 0 { + c.retryDuration = 1 * time.Second + } + logger, err := common.Logger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if c.ctx == nil { + c.ctx = context.Background() + } + + // Run until we get an address from the service. + var address string + var unretryableErr error + err = backoff.Retry(withErrLogger(logger, func() error { + svc, err := c.k8sClient.CoreV1().Services(c.flagNamespace).Get(c.ctx, c.flagServiceName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("getting service %s: %s", c.flagServiceName, err) + } + switch svc.Spec.Type { + case v1.ServiceTypeClusterIP: + address = svc.Spec.ClusterIP + return nil + case v1.ServiceTypeNodePort: + unretryableErr = errors.New("services of type NodePort are not supported") + return nil + case v1.ServiceTypeExternalName: + unretryableErr = errors.New("services of type ExternalName are not supported") + return nil + case v1.ServiceTypeLoadBalancer: + for _, ingr := range svc.Status.LoadBalancer.Ingress { + if ingr.IP != "" { + address = ingr.IP + return nil + } else if ingr.Hostname != "" { + if c.flagResolveHostnames { + address, unretryableErr = resolveHostname(ingr.Hostname) + } else { + address = ingr.Hostname + } + return nil + } + } + return fmt.Errorf("service %s has no ingress IP or hostname", c.flagServiceName) + default: + unretryableErr = fmt.Errorf("unknown service type %q", svc.Spec.Type) + return nil + } + }), backoff.NewConstantBackOff(c.retryDuration)) + + if err != nil || unretryableErr != nil { + c.UI.Error(fmt.Sprintf("Unable to get service address: %s, err: %s", unretryableErr.Error(), err)) + return 1 + } + + // Write the address to file. + err = os.WriteFile(c.flagOutputFile, []byte(address), 0600) + if err != nil { + c.UI.Error(fmt.Sprintf("Unable to write address to file: %s", err)) + return 1 + } + + c.UI.Info(fmt.Sprintf("Address %q written to %s successfully", address, c.flagOutputFile)) + return 0 +} + +func (c *Command) validateFlags(args []string) error { + if err := c.flags.Parse(args); err != nil { + return err + } + if len(c.flags.Args()) > 0 { + return errors.New("should have no non-flag arguments") + } + if c.flagNamespace == "" { + return errors.New("-k8s-namespace must be set") + } + if c.flagServiceName == "" { + return errors.New("-name must be set") + } + if c.flagOutputFile == "" { + return errors.New("-output-file must be set") + } + return nil +} + +// resolveHostname returns the first ipv4 address for host. +func resolveHostname(host string) (string, error) { + ips, err := net.LookupIP(host) + if err != nil { + return "", fmt.Errorf("unable to resolve hostname: %s", err) + } + if len(ips) < 1 { + return "", fmt.Errorf("hostname %q had no resolveable IPs", host) + } + + for _, ip := range ips { + v4 := ip.To4() + if v4 == nil { + continue + } + return ip.String(), nil + } + return "", fmt.Errorf("hostname %q had no ipv4 IPs", host) +} + +// withErrLogger runs op and logs if op returns an error. +// It returns the result of op. +func withErrLogger(log hclog.Logger, op func() error) func() error { + return func() error { + err := op() + if err != nil { + log.Error(err.Error()) + } + return err + } +} + +func (c *Command) Synopsis() string { return synopsis } +func (c *Command) Help() string { + c.once.Do(c.init) + return c.help +} + +const synopsis = "Output Kubernetes Service address to file" +const help = ` +Usage: consul-k8s-control-plane service-address [options] + + Waits until the Kubernetes service specified by -name in namespace + -k8s-namespace is created, then writes its address to -output-file. + The address written depends on the service type: + ClusterIP - Cluster IP + NodePort - Not supported + LoadBalancer - Load balancer's IP or hostname + ExternalName - Not Supported +` diff --git a/control-plane/subcommand/service-address/command_test.go b/control-plane/subcommand/service-address/command_test.go new file mode 100644 index 0000000000..24e83551b6 --- /dev/null +++ b/control-plane/subcommand/service-address/command_test.go @@ -0,0 +1,397 @@ +package serviceaddress + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/fake" +) + +// Test that flags are validated. +func TestRun_FlagValidation(t *testing.T) { + cases := []struct { + Flags []string + ExpErr string + }{ + { + Flags: []string{}, + ExpErr: "-k8s-namespace must be set", + }, + { + Flags: []string{"-k8s-namespace=default"}, + ExpErr: "-name must be set", + }, + { + Flags: []string{"-k8s-namespace=default", "-name=name"}, + ExpErr: "-output-file must be set", + }, + } + for _, c := range cases { + t.Run(c.ExpErr, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + responseCode := cmd.Run(c.Flags) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), c.ExpErr) + }) + } +} + +// Test that if the file can't be written to we return an error. +func TestRun_UnableToWriteToFile(t *testing.T) { + t.Parallel() + require := require.New(t) + + k8sNS := "default" + svcName := "service-name" + expAddress := "1.2.3.4" + + // Create the service. + k8s := fake.NewSimpleClientset() + _, err := k8s.CoreV1().Services(k8sNS).Create(context.Background(), kubeLoadBalancerSvc(svcName, expAddress, ""), metav1.CreateOptions{}) + require.NoError(err) + + // Run command with an unwriteable file. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + } + responseCode := cmd.Run([]string{ + "-k8s-namespace", k8sNS, + "-name", svcName, + "-output-file", "/this/filepath/does/not/exist", + }) + require.Equal(1, responseCode, ui.ErrorWriter.String()) + require.Contains(ui.ErrorWriter.String(), + "Unable to write address to file: open /this/filepath/does/not/exist: no such file or directory") +} + +func TestRun_UnresolvableHostname(t *testing.T) { + t.Parallel() + require := require.New(t) + + k8sNS := "default" + svcName := "service-name" + + // Create the service. + k8s := fake.NewSimpleClientset() + _, err := k8s.CoreV1().Services(k8sNS).Create(context.Background(), kubeLoadBalancerSvc(svcName, "", "unresolvable"), metav1.CreateOptions{}) + require.NoError(err) + + // Run command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + } + tmpDir, err := os.MkdirTemp("", "") + require.NoError(err) + defer os.RemoveAll(tmpDir) + outputFile := filepath.Join(tmpDir, "address.txt") + + responseCode := cmd.Run([]string{ + "-k8s-namespace", k8sNS, + "-name", svcName, + "-output-file", outputFile, + "-resolve-hostnames=true", + }) + require.Equal(1, responseCode) + require.Contains(ui.ErrorWriter.String(), "Unable to get service address: unable to resolve hostname:") +} + +// Test running with different service types. +func TestRun_ServiceTypes(t *testing.T) { + t.Parallel() + + // All services will have the name "service-name" + cases := map[string]struct { + Service *v1.Service + ServiceModificationF func(*v1.Service) + ResolveHostnames bool + ExpErr string + ExpAddress string + }{ + "ClusterIP": { + Service: kubeClusterIPSvc("service-name"), + ExpAddress: "5.6.7.8", + }, + "NodePort": { + Service: kubeNodePortSvc("service-name"), + ExpErr: "services of type NodePort are not supported", + }, + "LoadBalancer IP": { + Service: kubeLoadBalancerSvc("service-name", "1.2.3.4", ""), + ExpAddress: "1.2.3.4", + }, + "LoadBalancer hostname": { + Service: kubeLoadBalancerSvc("service-name", "", "localhost"), + ExpAddress: "localhost", + }, + "LoadBalancer hostname with resolve-hostnames=true": { + Service: kubeLoadBalancerSvc("service-name", "", "localhost"), + ResolveHostnames: true, + ExpAddress: "127.0.0.1", + }, + "LoadBalancer IP and hostname": { + Service: kubeLoadBalancerSvc("service-name", "1.2.3.4", "example.com"), + ExpAddress: "1.2.3.4", + }, + "LoadBalancer first ingress empty": { + Service: kubeLoadBalancerSvc("service-name", "1.2.3.4", "example.com"), + ServiceModificationF: func(svc *v1.Service) { + svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{ + {}, + { + IP: "5.6.7.8", + }, + } + }, + ExpAddress: "5.6.7.8", + }, + "ExternalName": { + Service: kubeExternalNameSvc("service-name"), + ExpErr: "services of type ExternalName are not supported", + }, + "invalid name": { + Service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-name", + }, + Spec: v1.ServiceSpec{ + Type: "invalid", + }, + }, + ExpErr: "unknown service type \"invalid\"", + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + k8sNS := "default" + svcName := "service-name" + + // Create the service. + k8s := fake.NewSimpleClientset() + if c.ServiceModificationF != nil { + c.ServiceModificationF(c.Service) + } + _, err := k8s.CoreV1().Services(k8sNS).Create(context.Background(), c.Service, metav1.CreateOptions{}) + require.NoError(err) + + // Run command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + } + tmpDir, err := os.MkdirTemp("", "") + require.NoError(err) + defer os.RemoveAll(tmpDir) + outputFile := filepath.Join(tmpDir, "address.txt") + + args := []string{ + "-k8s-namespace", k8sNS, + "-name", svcName, + "-output-file", outputFile, + } + if c.ResolveHostnames { + args = append(args, "-resolve-hostnames=true") + } + responseCode := cmd.Run(args) + if c.ExpErr != "" { + require.Equal(1, responseCode) + require.Contains(ui.ErrorWriter.String(), c.ExpErr) + } else { + require.Equal(0, responseCode, ui.ErrorWriter.String()) + actAddressBytes, err := os.ReadFile(outputFile) + require.NoError(err) + require.Equal(c.ExpAddress, string(actAddressBytes)) + } + }) + } +} + +// Test that we write the address to file successfully, even when we have to retry +// looking up the service. This mimics what happens in Kubernetes when a +// service gets an ingress address after a cloud provider provisions a +// load balancer. +func TestRun_FileWrittenAfterRetry(t *testing.T) { + t.Parallel() + cases := map[string]struct { + // InitialService controls whether a service with that name will have + // already been created. The service won't have an address yet. + InitialService bool + // UpdateDelay controls how long we wait before updating the service + // with the UpdateIP address. NOTE: the retry duration for this + // test is set to 10ms. + UpdateDelay time.Duration + }{ + "initial service exists": { + InitialService: true, + UpdateDelay: 50 * time.Millisecond, + }, + "initial service does not exist, immediate update": { + InitialService: false, + UpdateDelay: 0, + }, + "initial service does not exist, 50ms delay": { + InitialService: false, + UpdateDelay: 50 * time.Millisecond, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + k8sNS := "default" + svcName := "service-name" + ip := "1.2.3.4" + k8s := fake.NewSimpleClientset() + + if c.InitialService { + svc := kubeLoadBalancerSvc(svcName, "", "") + // Reset the status to nothing. + svc.Status = v1.ServiceStatus{} + _, err := k8s.CoreV1().Services(k8sNS).Create(context.Background(), svc, metav1.CreateOptions{}) + require.NoError(t, err) + } + + // Create/update the service after delay. + go func() { + time.Sleep(c.UpdateDelay) + svc := kubeLoadBalancerSvc(svcName, ip, "") + var err error + if c.InitialService { + _, err = k8s.CoreV1().Services(k8sNS).Update(context.Background(), svc, metav1.UpdateOptions{}) + } else { + _, err = k8s.CoreV1().Services(k8sNS).Create(context.Background(), svc, metav1.CreateOptions{}) + } + require.NoError(t, err) + }() + + // Run command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + retryDuration: 10 * time.Millisecond, + } + tmpDir, err := os.MkdirTemp("", "") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + outputFile := filepath.Join(tmpDir, "address.txt") + + responseCode := cmd.Run([]string{ + "-k8s-namespace", k8sNS, + "-name", svcName, + "-output-file", outputFile, + }) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + actAddressBytes, err := os.ReadFile(outputFile) + require.NoError(t, err) + require.Equal(t, ip, string(actAddressBytes)) + }) + } +} + +func kubeLoadBalancerSvc(name string, ip string, hostname string) *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1.ServiceSpec{ + Type: "LoadBalancer", + ClusterIP: "9.0.1.2", + Ports: []v1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 80, + TargetPort: intstr.IntOrString{ + IntVal: 8080, + }, + NodePort: 32001, + }, + }, + }, + Status: v1.ServiceStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + IP: ip, + Hostname: hostname, + }, + }, + }, + }, + } +} + +func kubeNodePortSvc(name string) *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1.ServiceSpec{ + Type: "NodePort", + ClusterIP: "1.2.3.4", + Ports: []v1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 80, + TargetPort: intstr.IntOrString{ + IntVal: 8080, + }, + NodePort: 32000, + }, + }, + }, + } +} + +func kubeClusterIPSvc(name string) *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1.ServiceSpec{ + Type: "ClusterIP", + ClusterIP: "5.6.7.8", + Ports: []v1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 80, + TargetPort: intstr.IntOrString{ + IntVal: 8080, + }, + }, + }, + }, + } +} + +func kubeExternalNameSvc(name string) *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1.ServiceSpec{ + Type: "ExternalName", + ExternalName: fmt.Sprintf("%s.example.com", name), + }, + } +} diff --git a/control-plane/subcommand/sync-catalog/command.go b/control-plane/subcommand/sync-catalog/command.go index f890b44f34..105ce6619c 100644 --- a/control-plane/subcommand/sync-catalog/command.go +++ b/control-plane/subcommand/sync-catalog/command.go @@ -15,12 +15,11 @@ import ( mapset "github.com/deckarep/golang-set" catalogtoconsul "github.com/hashicorp/consul-k8s/control-plane/catalog/to-consul" catalogtok8s "github.com/hashicorp/consul-k8s/control-plane/catalog/to-k8s" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/helper/controller" "github.com/hashicorp/consul-k8s/control-plane/subcommand" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" + "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +33,7 @@ type Command struct { UI cli.Ui flags *flag.FlagSet - consul *flags.ConsulFlags + http *flags.HTTPFlags k8s *flags.K8SFlags flagListen string flagToConsul bool @@ -64,17 +63,13 @@ type Command struct { flagK8SNSMirroringPrefix string // Prefix added to Consul namespaces created when mirroring flagCrossNamespaceACLPolicy string // The name of the ACL policy to add to every created namespace if ACLs are enabled - clientset kubernetes.Interface + consulClient *api.Client + clientset kubernetes.Interface - // ready indicates whether this controller is ready to sync services. This will be changed to true once the - // consul-server-connection-manager has finished initial initialization. - ready bool - - once sync.Once - sigCh chan os.Signal - help string - logger hclog.Logger - connMgr consul.ServerConnectionManager + once sync.Once + sigCh chan os.Signal + help string + logger hclog.Logger } func (c *Command) init() { @@ -148,9 +143,9 @@ func (c *Command) init() { "[Enterprise Only] Name of the ACL policy to attach to all created Consul namespaces to allow service "+ "discovery across Consul namespaces. Only necessary if ACLs are enabled.") - c.consul = &flags.ConsulFlags{} + c.http = &flags.HTTPFlags{} c.k8s = &flags.K8SFlags{} - flags.Merge(c.flags, c.consul.Flags()) + flags.Merge(c.flags, c.http.Flags()) flags.Merge(c.flags, c.k8s.Flags()) c.help = flags.Usage(help, c.flags) @@ -195,49 +190,26 @@ func (c *Command) Run(args []string) int { } } - // Set up logging - if c.logger == nil { + // Setup Consul client + if c.consulClient == nil { var err error - c.logger, err = common.Logger(c.flagLogLevel, c.flagLogJSON) + c.consulClient, err = c.http.APIClient() if err != nil { - c.UI.Error(err.Error()) + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) return 1 } } - // Create Consul API config object. - consulConfig := c.consul.ConsulClientConfig() - - // Create a context to be used by the processes started in this command. - ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - defer cancelFunc() - - if c.connMgr == nil { - // Start Consul server Connection manager. - serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) - return 1 - } - c.connMgr, err = discovery.NewWatcher(ctx, serverConnMgrCfg, c.logger.Named("consul-server-connection-manager")) + // Set up logging + if c.logger == nil { + var err error + c.logger, err = common.Logger(c.flagLogLevel, c.flagLogJSON) if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + c.UI.Error(err.Error()) return 1 } - - go c.connMgr.Run() - defer c.connMgr.Stop() } - // This is a blocking command that is run in order to ensure we only start the - // sync-catalog controllers only after we have access to the Consul server. - _, err := c.connMgr.State() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to start Consul server watcher: %s", err)) - return 1 - } - c.ready = true - // Convert allow/deny lists to sets allowSet := flags.ToSet(c.flagAllowK8sNamespacesList) denySet := flags.ToSet(c.flagDenyK8sNamespacesList) @@ -255,17 +227,31 @@ func (c *Command) Run(args []string) int { // Start the K8S-to-Consul syncer var toConsulCh chan struct{} if c.flagToConsul { + // If namespaces are enabled we need to use a new Consul API endpoint + // to list node services. This endpoint is only available in Consul + // 1.7+. To preserve backwards compatibility, when namespaces are not + // enabled we use a client that queries the older API endpoint. + var svcsClient catalogtoconsul.ConsulNodeServicesClient + if c.flagEnableNamespaces { + svcsClient = &catalogtoconsul.NamespacesNodeServicesClient{ + Client: c.consulClient, + } + } else { + svcsClient = &catalogtoconsul.PreNamespacesNodeServicesClient{ + Client: c.consulClient, + } + } // Build the Consul sync and start it syncer := &catalogtoconsul.ConsulSyncer{ - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: c.connMgr, - Log: c.logger.Named("to-consul/sink"), - EnableNamespaces: c.flagEnableNamespaces, - CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, - SyncPeriod: c.flagConsulWritePeriod, - ServicePollPeriod: c.flagConsulWritePeriod * 2, - ConsulK8STag: c.flagConsulK8STag, - ConsulNodeName: c.flagConsulNodeName, + Client: c.consulClient, + Log: c.logger.Named("to-consul/sink"), + EnableNamespaces: c.flagEnableNamespaces, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + SyncPeriod: c.flagConsulWritePeriod, + ServicePollPeriod: c.flagConsulWritePeriod * 2, + ConsulK8STag: c.flagConsulK8STag, + ConsulNodeName: c.flagConsulNodeName, + ConsulNodeServicesClient: svcsClient, } go syncer.Run(ctx) @@ -312,13 +298,12 @@ func (c *Command) Run(args []string) int { } source := &catalogtok8s.Source{ - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: c.connMgr, - Domain: c.flagConsulDomain, - Sink: sink, - Prefix: c.flagK8SServicePrefix, - Log: c.logger.Named("to-k8s/source"), - ConsulK8STag: c.flagConsulK8STag, + Client: c.consulClient, + Domain: c.flagConsulDomain, + Sink: sink, + Prefix: c.flagK8SServicePrefix, + Log: c.logger.Named("to-k8s/source"), + ConsulK8STag: c.flagConsulK8STag, } go source.Run(ctx) @@ -378,9 +363,12 @@ func (c *Command) Run(args []string) int { } } -func (c *Command) handleReady(rw http.ResponseWriter, _ *http.Request) { - if !c.ready { - c.UI.Error("[GET /health/ready] sync catalog controller is not yet ready") +func (c *Command) handleReady(rw http.ResponseWriter, req *http.Request) { + // The main readiness check is whether sync can talk to + // the consul cluster, in this case querying for the leader + _, err := c.consulClient.Status().Leader() + if err != nil { + c.UI.Error(fmt.Sprintf("[GET /health/ready] Error getting leader status: %s", err)) rw.WriteHeader(500) return } diff --git a/control-plane/subcommand/sync-catalog/command_ent_test.go b/control-plane/subcommand/sync-catalog/command_ent_test.go index fac330c557..4e5ba14e93 100644 --- a/control-plane/subcommand/sync-catalog/command_ent_test.go +++ b/control-plane/subcommand/sync-catalog/command_ent_test.go @@ -5,12 +5,10 @@ package synccatalog import ( "context" "fmt" - "strconv" "strings" "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" @@ -45,15 +43,20 @@ func TestRun_ToConsulSingleDestinationNamespace(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) - consulClient := testClient.APIClient + k8s, testServer := completeSetupEnterprise(tt) + defer testServer.Stop() // Run the command. ui := cli.NewMockUi() + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(tt, err) + cmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name(), Level: hclog.Debug, @@ -61,7 +64,7 @@ func TestRun_ToConsulSingleDestinationNamespace(t *testing.T) { } // Create two services in k8s in default and foo namespaces. - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(tt, err) _, err = k8s.CoreV1().Namespaces().Create( context.Background(), @@ -76,8 +79,6 @@ func TestRun_ToConsulSingleDestinationNamespace(t *testing.T) { require.NoError(tt, err) exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "500ms", "-add-k8s-namespace-suffix", "-log-level=debug", @@ -180,16 +181,20 @@ func TestRun_ToConsulMirroringNamespaces(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) - - consulClient := testClient.APIClient + k8s, testServer := completeSetupEnterprise(tt) + defer testServer.Stop() // Run the command. ui := cli.NewMockUi() + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(tt, err) + cmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name(), Level: hclog.Debug, @@ -197,7 +202,7 @@ func TestRun_ToConsulMirroringNamespaces(t *testing.T) { } // Create two services in k8s in default and foo namespaces. - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(tt, err) _, err = k8s.CoreV1().Namespaces().Create( context.Background(), @@ -212,8 +217,6 @@ func TestRun_ToConsulMirroringNamespaces(t *testing.T) { require.NoError(tt, err) args := append([]string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "500ms", "-add-k8s-namespace-suffix", "-log-level=debug", @@ -452,13 +455,15 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) - consulClient := testClient.APIClient - + k8s, testServer := completeSetupEnterprise(tt) + defer testServer.Stop() ui := cli.NewMockUi() + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(tt, err) + commonArgs := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "500ms", "-log-level=debug", "-allow-k8s-namespace=*", @@ -466,7 +471,7 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { // Create two services in k8s in default and foo namespaces. { - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(tt, err) _, err = k8s.CoreV1().Namespaces().Create( context.Background(), @@ -484,9 +489,9 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { // Run the first command. { firstCmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name() + "-firstrun", Level: hclog.Debug, @@ -514,9 +519,9 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { // Run the second command. { secondCmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name() + "-secondrun", Level: hclog.Debug, @@ -559,7 +564,7 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { } // Tests that the cross-namespace ACL policy is correctly -// attached to all created namespaces. Specific test for +// attached to all created namespaces. Specific teste for // services and their destinations are covered in other tests. func TestRun_ToConsulNamespacesACLs(t *testing.T) { cases := []struct { @@ -617,17 +622,37 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { require.NoError(tt, err) // Set up consul server - bootToken := "74044c72-03c8-42b0-b57f-728bb22ca7fb" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - c.ACL.Enabled = true - c.ACL.Tokens.InitialManagement = bootToken + a, err := testutil.NewTestServerConfigT(tt, func(client *testutil.TestServerConfig) { + client.ACL.Enabled = true }) + require.NoError(tt, err) + defer a.Stop() + + // Set up a client for bootstrapping + bootClient, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(tt, err) + + // Bootstrap the server and get the bootstrap token + var bootstrapResp *api.ACLToken + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + bootstrapResp, _, err = bootClient.ACL().Bootstrap() + require.NoError(r, err) + }) + bootstrapToken := bootstrapResp.SecretID + require.NotEmpty(tt, bootstrapToken) // Set up consul client - client := testClient.APIClient + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + Token: bootstrapToken, + }) + require.NoError(tt, err) // Create cross namespace policy - // This would have been created by the server-acl-init in the + // This would have been created by the acl bootstrapper in the // default namespace to be attached to all created namespaces. crossNamespaceRules := `namespace_prefix "" { service_prefix "" { @@ -650,9 +675,9 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { // Set up the sync command ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: client, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name(), Level: hclog.Debug, @@ -661,9 +686,6 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { // Set flags and run the command commonArgs := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), - "-token", bootToken, "-consul-write-interval", "500ms", "-log-level=debug", "-allow-k8s-namespace=*", @@ -674,7 +696,7 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { defer stopCommand(tt, &cmd, exitChan) // Check the namespaces are created correctly - timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + timer = &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} retry.RunWith(timer, tt, func(r *retry.R) { // Check that we have the right number of namespaces namespaces, _, err := client.Namespaces().List(&api.QueryOptions{}) @@ -707,7 +729,16 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { } } + }) }) } } + +// Set up test consul agent and fake kubernetes cluster client +func completeSetupEnterprise(t *testing.T) (*fake.Clientset, *testutil.TestServer) { + k8s := fake.NewSimpleClientset() + svr, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + return k8s, svr +} diff --git a/control-plane/subcommand/sync-catalog/command_test.go b/control-plane/subcommand/sync-catalog/command_test.go index 8228986d00..c4e892c834 100644 --- a/control-plane/subcommand/sync-catalog/command_test.go +++ b/control-plane/subcommand/sync-catalog/command_test.go @@ -3,12 +3,12 @@ package synccatalog import ( "context" "os" - "strconv" "syscall" "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" @@ -55,7 +55,8 @@ func TestRun_FlagValidation(t *testing.T) { func TestRun_Defaults_SyncsConsulServiceToK8s(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testServer := completeSetup(t) + defer testServer.Stop() // Run the command. ui := cli.NewMockUi() @@ -66,12 +67,11 @@ func TestRun_Defaults_SyncsConsulServiceToK8s(t *testing.T) { Name: t.Name(), Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), + "-http-addr", testServer.HTTPAddr, + "-consul-api-timeout", "5s", }) defer stopCommand(t, &cmd, exitChan) @@ -92,7 +92,8 @@ func TestRun_ExitCleanlyOnSignals(t *testing.T) { func testSignalHandling(sig os.Signal) func(*testing.T) { return func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testServer := completeSetup(t) + defer testServer.Stop() // Run the command. ui := cli.NewMockUi() @@ -103,12 +104,11 @@ func testSignalHandling(sig os.Signal) func(*testing.T) { Name: t.Name(), Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), + "-http-addr", testServer.HTTPAddr, + "-consul-api-timeout", "5s", }) cmd.sendSignal(sig) @@ -132,29 +132,32 @@ func testSignalHandling(sig os.Signal) func(*testing.T) { func TestRun_ToConsulWithAddK8SNamespaceSuffix(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) - consulClient := testClient.APIClient + k8s, testServer := completeSetup(t) + defer testServer.Stop() + + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: t.Name(), Level: hclog.Debug, }), flagAllowK8sNamespacesList: []string{"*"}, - connMgr: testClient.Watcher, } // create a service in k8s - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(t, err) exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), // change the write interval, so we can see changes in Consul quicker "-consul-write-interval", "100ms", "-add-k8s-namespace-suffix", @@ -174,30 +177,32 @@ func TestRun_ToConsulWithAddK8SNamespaceSuffix(t *testing.T) { func TestCommand_Run_ToConsulChangeAddK8SNamespaceSuffixToTrue(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testServer := completeSetup(t) + defer testServer.Stop() - consulClient := testClient.APIClient + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: t.Name(), Level: hclog.Debug, }), flagAllowK8sNamespacesList: []string{"*"}, - connMgr: testClient.Watcher, } // create a service in k8s - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(t, err) exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", }) @@ -212,8 +217,6 @@ func TestCommand_Run_ToConsulChangeAddK8SNamespaceSuffixToTrue(t *testing.T) { // restart sync with -add-k8s-namespace-suffix exitChan = runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", "-add-k8s-namespace-suffix", }) @@ -234,33 +237,35 @@ func TestCommand_Run_ToConsulChangeAddK8SNamespaceSuffixToTrue(t *testing.T) { func TestCommand_Run_ToConsulTwoServicesSameNameDifferentNamespace(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testServer := completeSetup(t) + defer testServer.Stop() - consulClient := testClient.APIClient + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: t.Name(), Level: hclog.Debug, }), flagAllowK8sNamespacesList: []string{"*"}, - connMgr: testClient.Watcher, } // create two services in k8s - _, err := k8s.CoreV1().Services("bar").Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services("bar").Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(t, err) _, err = k8s.CoreV1().Services("baz").Create(context.Background(), lbService("foo", "2.2.2.2"), metav1.CreateOptions{}) require.NoError(t, err) exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", "-add-k8s-namespace-suffix", }) @@ -326,13 +331,17 @@ func TestRun_ToConsulAllowDenyLists(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) + k8s, testServer := completeSetup(tt) + defer testServer.Stop() - consulClient := testClient.APIClient + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) // Create two services in k8s in default and foo namespaces. { - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(tt, err) _, err = k8s.CoreV1().Namespaces().Create( context.Background(), @@ -348,8 +357,6 @@ func TestRun_ToConsulAllowDenyLists(t *testing.T) { } flags := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", "-log-level=debug", } @@ -363,13 +370,13 @@ func TestRun_ToConsulAllowDenyLists(t *testing.T) { // Run the command ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name(), Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&cmd, flags) defer stopCommand(tt, &cmd, exitChan) @@ -473,15 +480,17 @@ func TestRun_ToConsulChangingFlags(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) + k8s, testServer := completeSetup(tt) + defer testServer.Stop() - consulClient := testClient.APIClient + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) ui := cli.NewMockUi() commonArgs := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", "-log-level=debug", } @@ -506,13 +515,13 @@ func TestRun_ToConsulChangingFlags(t *testing.T) { // Run the first command. { firstCmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name() + "-firstrun", Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&firstCmd, append(commonArgs, c.FirstRunFlags...)) @@ -532,13 +541,13 @@ func TestRun_ToConsulChangingFlags(t *testing.T) { // Run the second command. { secondCmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name() + "-secondrun", Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&secondCmd, append(commonArgs, c.SecondRunFlags...)) defer stopCommand(tt, &secondCmd, exitChan) @@ -568,12 +577,13 @@ func TestRun_ToConsulChangingFlags(t *testing.T) { } // Set up test consul agent and fake kubernetes cluster client. -func completeSetup(t *testing.T) (*fake.Clientset, *test.TestServerClient) { +func completeSetup(t *testing.T) (*fake.Clientset, *testutil.TestServer) { k8s := fake.NewSimpleClientset() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) + svr, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) - return k8s, testClient + return k8s, svr } // This function starts the command asynchronously and returns a non-blocking chan. diff --git a/control-plane/subcommand/webhook-cert-manager/command_test.go b/control-plane/subcommand/webhook-cert-manager/command_test.go index 7e302d5261..1841abf802 100644 --- a/control-plane/subcommand/webhook-cert-manager/command_test.go +++ b/control-plane/subcommand/webhook-cert-manager/command_test.go @@ -89,7 +89,7 @@ func testSignalHandling(sig os.Signal) func(*testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFile)) require.NoError(t, err) @@ -212,7 +212,7 @@ func TestRun_SecretDoesNotExist(t *testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFile)) require.NoError(t, err) @@ -340,7 +340,7 @@ func TestRun_SecretExists(t *testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFile)) require.NoError(t, err) @@ -440,7 +440,7 @@ func TestRun_SecretUpdates(t *testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFileUpdates)) require.NoError(t, err) @@ -630,7 +630,7 @@ func TestCertWatcher(t *testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFileUpdates)) require.NoError(t, err) diff --git a/control-plane/version/version.go b/control-plane/version/version.go index 35c4d195a9..082ffeb1f3 100644 --- a/control-plane/version/version.go +++ b/control-plane/version/version.go @@ -14,12 +14,12 @@ var ( // // Version must conform to the format expected by // github.com/hashicorp/go-version for tests to work. - Version = "1.0.0" + Version = "0.49.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. - VersionPrerelease = "dev" + VersionPrerelease = "" ) // GetHumanVersion composes the parts of the version in a way that's suitable diff --git a/hack/aws-acceptance-test-cleanup/go.mod b/hack/aws-acceptance-test-cleanup/go.mod index 13e8f48909..a266c1a7e8 100644 --- a/hack/aws-acceptance-test-cleanup/go.mod +++ b/hack/aws-acceptance-test-cleanup/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/consul-helm/hack/aws-acceptance-test-cleanup -go 1.19 +go 1.18 require ( github.com/aws/aws-sdk-go v1.38.63 diff --git a/hack/copy-crds-to-chart/go.mod b/hack/copy-crds-to-chart/go.mod index 73b1f10306..5456f28ce2 100644 --- a/hack/copy-crds-to-chart/go.mod +++ b/hack/copy-crds-to-chart/go.mod @@ -1,3 +1,3 @@ module github.com/hashicorp/consul-k8s/hack/copy-crds-to-chart -go 1.19 +go 1.18 diff --git a/hack/helm-reference-gen/go.mod b/hack/helm-reference-gen/go.mod index 7e41675f18..8595831de1 100644 --- a/hack/helm-reference-gen/go.mod +++ b/hack/helm-reference-gen/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/consul-k8s/hack/helm-reference-gen -go 1.19 +go 1.18 require ( github.com/stretchr/testify v1.6.1