From ada3938115ac74afe1370287506a87ce2c00bc58 Mon Sep 17 00:00:00 2001 From: Ronald Date: Thu, 6 Jul 2023 07:27:30 -0400 Subject: [PATCH 01/43] Add first integration test for jwt auth with intention (#18005) --- agent/xds/listeners.go | 5 +- test/integration/consul-container/go.mod | 2 + test/integration/consul-container/go.sum | 6 + .../consul-container/libs/utils/helpers.go | 104 +++++++++ .../test/jwtauth/jwt_auth_test.go | 215 ++++++++++++++++++ 5 files changed, 329 insertions(+), 3 deletions(-) create mode 100644 test/integration/consul-container/test/jwtauth/jwt_auth_test.go diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 4278c3a8b6cc..6e67cd1c564e 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -1381,12 +1381,11 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot if err != nil { return nil, err } - - filterOpts.httpAuthzFilters = []*envoy_http_v3.HttpFilter{rbacFilter} - + filterOpts.httpAuthzFilters = []*envoy_http_v3.HttpFilter{} if jwtFilter != nil { filterOpts.httpAuthzFilters = append(filterOpts.httpAuthzFilters, jwtFilter) } + filterOpts.httpAuthzFilters = append(filterOpts.httpAuthzFilters, rbacFilter) meshConfig := cfgSnap.MeshConfig() includeXFCC := meshConfig == nil || meshConfig.HTTP == nil || !meshConfig.HTTP.SanitizeXForwardedClientCert diff --git a/test/integration/consul-container/go.mod b/test/integration/consul-container/go.mod index 46a4d21c9b9f..9eba92ca55fb 100644 --- a/test/integration/consul-container/go.mod +++ b/test/integration/consul-container/go.mod @@ -7,6 +7,7 @@ require ( github.com/avast/retry-go v3.0.0+incompatible github.com/docker/docker v23.0.6+incompatible github.com/docker/go-connections v0.4.0 + github.com/go-jose/go-jose/v3 v3.0.0 github.com/hashicorp/consul v0.0.0-00010101000000-000000000000 github.com/hashicorp/consul/api v1.22.0-rc1 github.com/hashicorp/consul/envoyextensions v0.3.0-rc1 @@ -83,6 +84,7 @@ require ( github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/stretchr/objx v0.5.0 // indirect + golang.org/x/crypto v0.1.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/net v0.10.0 // indirect golang.org/x/sync v0.2.0 // indirect diff --git a/test/integration/consul-container/go.sum b/test/integration/consul-container/go.sum index b7ae09743d5b..02a74ddbe68e 100644 --- a/test/integration/consul-container/go.sum +++ b/test/integration/consul-container/go.sum @@ -79,6 +79,8 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -101,6 +103,7 @@ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -286,6 +289,7 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -303,10 +307,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= diff --git a/test/integration/consul-container/libs/utils/helpers.go b/test/integration/consul-container/libs/utils/helpers.go index e18293a96ac0..5f75e3e4b3f7 100644 --- a/test/integration/consul-container/libs/utils/helpers.go +++ b/test/integration/consul-container/libs/utils/helpers.go @@ -4,6 +4,15 @@ package utils import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "fmt" + + "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v3/jwt" "github.com/hashicorp/consul/api" ) @@ -18,3 +27,98 @@ func ApplyDefaultProxySettings(c *api.Client) (bool, error) { ok, _, err := c.ConfigEntries().Set(req, &api.WriteOptions{}) return ok, err } + +// Generates a private and public key pair that is for signing +// JWT. +func GenerateKey() (pub, priv string, err error) { + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + + if err != nil { + return "", "", fmt.Errorf("error generating private key: %w", err) + } + + { + derBytes, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return "", "", fmt.Errorf("error marshaling private key: %w", err) + } + priv = string(pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: derBytes, + })) + } + { + derBytes, err := x509.MarshalPKIXPublicKey(privateKey.Public()) + if err != nil { + return "", "", fmt.Errorf("error marshaling public key: %w", err) + } + pub = string(pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + })) + } + + return pub, priv, nil +} + +// SignJWT will bundle the provided claims into a signed JWT. The provided key +// is assumed to be ECDSA. +// +// If no private key is provided, it will generate a private key. These can +// be retrieved via the SigningKeys() method. +func SignJWT(privKey string, claims jwt.Claims, privateClaims interface{}) (string, error) { + var err error + if privKey == "" { + _, privKey, err = GenerateKey() + if err != nil { + return "", err + } + } + var key *ecdsa.PrivateKey + block, _ := pem.Decode([]byte(privKey)) + if block != nil { + key, err = x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return "", err + } + } + + sig, err := jose.NewSigner( + jose.SigningKey{Algorithm: jose.ES256, Key: key}, + (&jose.SignerOptions{}).WithType("JWT"), + ) + if err != nil { + return "", err + } + + raw, err := jwt.Signed(sig). + Claims(claims). + Claims(privateClaims). + CompactSerialize() + if err != nil { + return "", err + } + + return raw, nil +} + +// newJWKS converts a pem-encoded public key into JWKS data suitable for a +// verification endpoint response +func NewJWKS(pubKey string) (*jose.JSONWebKeySet, error) { + block, _ := pem.Decode([]byte(pubKey)) + if block == nil || block.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to decode public key") + } + + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, err + } + return &jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + { + Key: pub, + }, + }, + }, nil +} diff --git a/test/integration/consul-container/test/jwtauth/jwt_auth_test.go b/test/integration/consul-container/test/jwtauth/jwt_auth_test.go new file mode 100644 index 000000000000..37c846d0a6d4 --- /dev/null +++ b/test/integration/consul-container/test/jwtauth/jwt_auth_test.go @@ -0,0 +1,215 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jwtauth + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/require" + + "github.com/go-jose/go-jose/v3/jwt" + libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert" + libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" + libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service" + libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology" + libutils "github.com/hashicorp/consul/test/integration/consul-container/libs/utils" + "github.com/hashicorp/go-cleanhttp" + "testing" + "time" +) + +// TestJWTAuthConnectService summary +// This test ensures that when we have an intention referencing a JWT, requests +// without JWT authorization headers are denied. And requests with the correct JWT +// Authorization header are successful +// +// Steps: +// - Creates a single agent cluster +// - Creates a static-server and sidecar containers +// - Registers the created static-server and sidecar with consul +// - Create a static-client and sidecar containers +// - Registers the static-client and sidecar with consul +// - Ensure client sidecar is running as expected +// - Make a request without the JWT Authorization header and expects 401 StatusUnauthorized +// - Make a request with the JWT Authorization header and expects a 200 +func TestJWTAuthConnectService(t *testing.T) { + t.Parallel() + + cluster, _, _ := libtopology.NewCluster(t, &libtopology.ClusterConfig{ + NumServers: 1, + NumClients: 1, + ApplyDefaultProxySettings: true, + BuildOpts: &libcluster.BuildOptions{ + Datacenter: "dc1", + InjectAutoEncryption: true, + InjectGossipEncryption: true, + }, + }) + + clientService := createServices(t, cluster) + _, clientPort := clientService.GetAddr() + _, clientAdminPort := clientService.GetAdminAddr() + + libassert.AssertUpstreamEndpointStatus(t, clientAdminPort, "static-server.default", "HEALTHY", 1) + libassert.AssertContainerState(t, clientService, "running") + libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", clientPort), "static-server", "") + + claims := jwt.Claims{ + Subject: "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + Audience: jwt.Audience{"https://consul.test"}, + Issuer: "https://legit.issuer.internal/", + NotBefore: jwt.NewNumericDate(time.Now().Add(-5 * time.Second)), + Expiry: jwt.NewNumericDate(time.Now().Add(60 * time.Minute)), + } + + jwks, jwt := makeJWKSAndJWT(t, claims) + + // configure proxy-defaults, jwt-provider and intention + configureProxyDefaults(t, cluster) + configureJWTProvider(t, cluster, jwks, claims) + configureIntentions(t, cluster) + + baseURL := fmt.Sprintf("http://localhost:%d", clientPort) + // fails without jwt headers + doRequest(t, baseURL, http.StatusUnauthorized, "") + // succeeds with jwt + doRequest(t, baseURL, http.StatusOK, jwt) +} + +func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service { + node := cluster.Agents[0] + client := node.GetClient() + // Create a service and proxy instance + serviceOpts := &libservice.ServiceOpts{ + Name: libservice.StaticServerServiceName, + ID: "static-server", + HTTPPort: 8080, + GRPCPort: 8079, + } + + // Create a service and proxy instance + _, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts) + require.NoError(t, err) + + libassert.CatalogServiceExists(t, client, "static-server-sidecar-proxy", nil) + libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, nil) + + // Create a client proxy instance with the server as an upstream + clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false, false) + require.NoError(t, err) + + libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy", nil) + + return clientConnectProxy +} + +// creates a JWKS and JWT that will be used for validation +func makeJWKSAndJWT(t *testing.T, claims jwt.Claims) (string, string) { + pub, priv, err := libutils.GenerateKey() + require.NoError(t, err) + + jwks, err := libutils.NewJWKS(pub) + require.NoError(t, err) + + jwksJson, err := json.Marshal(jwks) + require.NoError(t, err) + + type orgs struct { + Primary string `json:"primary"` + } + privateCl := struct { + FirstName string `json:"first_name"` + Org orgs `json:"org"` + Groups []string `json:"groups"` + }{ + FirstName: "jeff2", + Org: orgs{"engineering"}, + Groups: []string{"foo", "bar"}, + } + + jwt, err := libutils.SignJWT(priv, claims, privateCl) + require.NoError(t, err) + return string(jwksJson), jwt +} + +// configures the protocol to http as this is needed for jwt-auth +func configureProxyDefaults(t *testing.T, cluster *libcluster.Cluster) { + client := cluster.Agents[0].GetClient() + + ok, _, err := client.ConfigEntries().Set(&api.ProxyConfigEntry{ + Kind: api.ProxyDefaults, + Name: api.ProxyConfigGlobal, + Config: map[string]interface{}{ + "protocol": "http", + }, + }, nil) + require.NoError(t, err) + require.True(t, ok) +} + +// creates a JWT local provider +func configureJWTProvider(t *testing.T, cluster *libcluster.Cluster, jwks string, claims jwt.Claims) { + client := cluster.Agents[0].GetClient() + + ok, _, err := client.ConfigEntries().Set(&api.JWTProviderConfigEntry{ + Kind: api.JWTProvider, + Name: "test-jwt", + JSONWebKeySet: &api.JSONWebKeySet{ + Local: &api.LocalJWKS{ + JWKS: base64.StdEncoding.EncodeToString([]byte(jwks)), + }, + }, + Issuer: claims.Issuer, + Audiences: claims.Audience, + }, nil) + require.NoError(t, err) + require.True(t, ok) +} + +// creates an intention referencing the jwt provider +func configureIntentions(t *testing.T, cluster *libcluster.Cluster) { + client := cluster.Agents[0].GetClient() + + ok, _, err := client.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ + Kind: "service-intentions", + Name: libservice.StaticServerServiceName, + Sources: []*api.SourceIntention{ + { + Name: libservice.StaticClientServiceName, + Action: api.IntentionActionAllow, + }, + }, + JWT: &api.IntentionJWTRequirement{ + Providers: []*api.IntentionJWTProvider{ + { + Name: "test-jwt", + VerifyClaims: []*api.IntentionJWTClaimVerification{}, + }, + }, + }, + }, nil) + require.NoError(t, err) + require.True(t, ok) +} + +func doRequest(t *testing.T, url string, expStatus int, jwt string) { + retry.RunWith(&retry.Timer{Timeout: 5 * time.Second, Wait: time.Second}, t, func(r *retry.R) { + + client := cleanhttp.DefaultClient() + + req, err := http.NewRequest("GET", url, nil) + require.NoError(r, err) + if jwt != "" { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt)) + } + resp, err := client.Do(req) + require.NoError(r, err) + require.Equal(r, expStatus, resp.StatusCode) + }) +} From f7d399f7fc39283e75f885cb22772cbab25db3b2 Mon Sep 17 00:00:00 2001 From: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Date: Thu, 6 Jul 2023 09:31:45 -0700 Subject: [PATCH 02/43] fix stand-in text for name field (#18030) --- .../connect/config-entries/control-plane-request-limit.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/content/docs/connect/config-entries/control-plane-request-limit.mdx b/website/content/docs/connect/config-entries/control-plane-request-limit.mdx index c9d05e2da3b0..21b06f6533c1 100644 --- a/website/content/docs/connect/config-entries/control-plane-request-limit.mdx +++ b/website/content/docs/connect/config-entries/control-plane-request-limit.mdx @@ -42,7 +42,7 @@ When every field is defined, a control plane request limit configuration entry h ```hcl kind = "control-plane-request-limit" mode = "permissive" -name = "" +name = "" read_rate = 100 write_rate = 100 kv = { @@ -64,7 +64,7 @@ catalog = { { "kind": "control-plane-request-limit", "mode": "permissive", - "name": "", + "name": "", "read_rate": 100, "write_rate": 100, "kv": { @@ -85,7 +85,7 @@ catalog = { ```yaml kind: control-plane-request-limit mode: permissive -name: +name: read_rate: 100 write_rate: 100 kv: From 820cdbb226bd32f11b31524af1578939b4d01f12 Mon Sep 17 00:00:00 2001 From: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Date: Thu, 6 Jul 2023 10:37:38 -0700 Subject: [PATCH 03/43] removed sameness conf entry from failover nav (#18033) --- website/data/docs-nav-data.json | 4 ---- 1 file changed, 4 deletions(-) diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 16feb0d11d97..7149dfebb299 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -780,10 +780,6 @@ { "title": "Configuration", "routes": [ - { - "title": "Sameness groups", - "href": "/consul/docs/connect/config-entries/service-resolver" - }, { "title": "Service resolver", "href": "/consul/docs/connect/config-entries/service-resolver" From 85f2ae024c66b67f9d8011e320fd9b3693df6b2f Mon Sep 17 00:00:00 2001 From: David Yu Date: Thu, 6 Jul 2023 12:46:48 -0700 Subject: [PATCH 04/43] docs - add service sync annotations and k8s service weight annotation (#18032) * Docs for https://github.com/hashicorp/consul-k8s/pull/2293 * remove versions for enterprise features since they are old --------- Co-authored-by: Tu Nguyen --- .../docs/k8s/annotations-and-labels.mdx | 63 ++++++++++++++++--- website/content/docs/k8s/service-sync.mdx | 8 +-- 2 files changed, 57 insertions(+), 14 deletions(-) diff --git a/website/content/docs/k8s/annotations-and-labels.mdx b/website/content/docs/k8s/annotations-and-labels.mdx index 547a82a6ccb3..56d0aa6006f1 100644 --- a/website/content/docs/k8s/annotations-and-labels.mdx +++ b/website/content/docs/k8s/annotations-and-labels.mdx @@ -9,16 +9,21 @@ description: >- ## Overview -Consul on Kubernetes provides a few options for customizing how connect-inject behavior should be configured. +Consul on Kubernetes provides a few options for customizing how connect-inject or service sync behavior should be configured. This allows the user to configure natively configure Consul on select Kubernetes resources (i.e. pods, services). -- [Annotations](#annotations) -- [Labels](#labels) +- [Consul Service Mesh](#consul-service-mesh) + - [Annotations](#annotations) + - [Labels](#labels) +- [Service Sync](#service-sync) + - [Annotations](#annotations-1) The noun _connect_ is used throughout this documentation to refer to the connect subsystem that provides Consul's service mesh capabilities. -## Annotations +## Consul Service Mesh + +### Annotations The following Kubernetes resource annotations could be used on a pod to control connect-inject behavior: @@ -76,7 +81,7 @@ The following Kubernetes resource annotations could be used on a pod to control local port to listen for those connections. When transparent proxy is enabled, this annotation is optional. This annotation can be either _labeled_ or _unlabeled_. We recommend the labeled format because it has a more consistent syntax and can be used to reference cluster peers as upstreams. - - **Labeled** (requires Consul on Kubernetes v0.45.0+): + - **Labeled**: The labeled annotation format allows you to reference any service as an upstream. You can specify a Consul Enterprise namespace. You can also specify an admin partition in the same datacenter, a cluster peer, or a WAN-federated datacenter. @@ -133,7 +138,7 @@ The following Kubernetes resource annotations could be used on a pod to control "consul.hashicorp.com/connect-service-upstreams":"[service-name]:[port]:[optional datacenter]" ``` - - Namespace (requires Consul Enterprise 1.7+): Upstream services may be running in a different namespace. Place + - Namespace: Upstream services may be running in a different namespace. Place the upstream namespace after the service name. For additional details about configuring the injector, refer to [Consul Enterprise namespaces](#consul-enterprise-namespaces) . ```yaml @@ -144,7 +149,7 @@ The following Kubernetes resource annotations could be used on a pod to control If the namespace is not specified, the annotation defaults to the namespace of the source service. Consul Enterprise v1.7 and older interprets the value placed in the namespace position as part of the service name. - - Admin partitions (requires Consul Enterprise 1.11+): Upstream services may be running in a different + - Admin partitions: Upstream services may be running in a different partition. When specifying a partition, you must also specify a namespace. Place the partition name after the namespace. If you specify the name of the datacenter, it must be the local datacenter. Communicating across partitions using this method is only supported within a datacenter. For cross partition communication across datacenters, [establish a cluster peering connection](/consul/docs/k8s/connect/cluster-peering/usage/establish-peering) and set the upstream with a labeled annotation format. @@ -265,7 +270,7 @@ The following Kubernetes resource annotations could be used on a pod to control "consul.hashicorp.com/consul-sidecar-user-volume-mount": "[{\"name\": \"secrets-store-mount\", \"mountPath\": \"/mnt/secrets-store\"}]" ``` -## Labels +### Labels Resource labels could be used on a Kubernetes service to control connect-inject behavior. @@ -276,3 +281,45 @@ Resource labels could be used on a Kubernetes service to control connect-inject registration to ignore all services except for the one which should be used for routing requests using Consul. +## Service Sync + +### Annotations + +The following Kubernetes resource annotations could be used on a pod to [Service Sync](https://developer.hashicorp.com/consul/docs/k8s/service-sync) behavior: + +- `consul.hashicorp.com/service-sync`: If this is set to `true`, then the Kubernetes service is explicitly configured to be synced to Consul. + + ```yaml + annotations: + 'consul.hashicorp.com/service-sync': 'true' + ``` + +- `consul.hashicorp.com/service-port`: Configures the port to register to the Consul Catalog for the Kubernetes service. The annotation value may be a name of a port (recommended) or an exact port value. Refer to [service ports](https://developer.hashicorp.com/consul/docs/k8s/service-sync#service-ports) for more information. + + ```yaml + annotations: + 'consul.hashicorp.com/service-port': 'http' + ``` + +- `consul.hashicorp.com/service-tags`: A comma separated list of strings (without whitespace) to use for registering tags to the service registered to Consul. These custom tags automatically include the `k8s` tag which can't be disabled. + + ```yaml + annotations: + 'consul.hashicorp.com/service-tags': 'primary,foo' + ``` + +- `consul.hashicorp.com/service-meta-KEY`: A map for specifying service metadata for Consul services. The "KEY" below can be set to any key. This allows you to set multiple meta values. + + ```yaml + annotations: + 'consul.hashicorp.com/service-meta-KEY': 'value' + ``` + +- `consul.hashicorp.com/service-weight:` - Configures ability to support weighted loadbalancing by service annotation for Catalog Sync. The integer provided will be applied as a weight for the `passing` state for the health of the service. Refer to [weights](/consul/docs/services/configuration/services-configuration-reference#weights) in service configuration for more information on how this is leveraged for services in the Consul catalog. + + ```yaml + annotations: + consul.hashicorp.com/service-weight: 10 + ``` + + diff --git a/website/content/docs/k8s/service-sync.mdx b/website/content/docs/k8s/service-sync.mdx index db3e2bc9d833..54ebbdd54d2c 100644 --- a/website/content/docs/k8s/service-sync.mdx +++ b/website/content/docs/k8s/service-sync.mdx @@ -12,7 +12,7 @@ services are available to Consul agents and services in Consul can be available as first-class Kubernetes services. This functionality is provided by the [consul-k8s project](https://github.com/hashicorp/consul-k8s) and can be automatically installed and configured using the -[Consul Helm chart](/consul/docs/k8s/installation/install). +[Consul K8s Helm chart](/consul/docs/k8s/installation/install). ![screenshot of a Kubernetes service in the UI](/img/k8s-service.png) @@ -31,11 +31,7 @@ service discovery, including hosted services like databases. ~> Enabling both Service Mesh and Service Sync on the same Kubernetes services is not supported, as Service Mesh also registers Kubernetes service instances to Consul. Ensure that Service Sync is only enabled for namespaces and services that are not injected with the Consul sidecar for Service Mesh as described in [Sync Enable/Disable](/consul/docs/k8s/service-sync#sync-enable-disable). -The service sync uses an external long-running process in the -[consul-k8s project](https://github.com/hashicorp/consul-k8s). This process -can run either inside or outside of a Kubernetes cluster. However, running this process within -the Kubernetes cluster is generally easier since it is automated using the -[Helm chart](/consul/docs/k8s/helm). +The service sync feature deploys a long-running process which can run either inside or outside of a Kubernetes cluster. However, running this process within the Kubernetes cluster is generally easier since it is automated using the [Helm chart](/consul/docs/k8s/helm). The Consul server cluster can run either in or out of a Kubernetes cluster. The Consul server cluster does not need to be running on the same machine From b9a6a744d5e309832d52ff38bc89c561f1aac201 Mon Sep 17 00:00:00 2001 From: David Yu Date: Fri, 7 Jul 2023 09:22:03 -0700 Subject: [PATCH 05/43] docs - add jobs use case for service mesh k8s (#18037) * docs - add jobs use case for service mesh k8s * add code blocks --- website/content/docs/k8s/connect/index.mdx | 218 ++++++++++++++++++--- 1 file changed, 196 insertions(+), 22 deletions(-) diff --git a/website/content/docs/k8s/connect/index.mdx b/website/content/docs/k8s/connect/index.mdx index afa625a0ba8d..884119cd92a5 100644 --- a/website/content/docs/k8s/connect/index.mdx +++ b/website/content/docs/k8s/connect/index.mdx @@ -19,10 +19,33 @@ Consul service mesh is enabled by default when you install Consul on Kubernetes If `connectInject.default` is set to `false` or you want to explicitly enable service mesh sidecar proxy injection for a specific deployment, add the `consul.hashicorp.com/connect-inject` annotation to the pod specification template and set it to `true` when connecting services to the mesh. -### Example +### Service names + +When the service is onboarded, the name registered in Consul is set to the name of the Kubernetes Service associated with the Pod. You can specify a custom name for the service in the [`consul.hashicorp.com/connect-service` annotation](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service), but if ACLs are enabled, then the name of the service registered in Consul must match the Pod's `ServiceAccount` name. + +### Transparent proxy mode + +By default, the Consul service mesh runs in transparent proxy mode. This mode forces inbound and outbound traffic through the sidecar proxy even though the service binds to all interfaces. Transparent proxy infers the location of upstream services using Consul service intentions, and also allows you to use Kubernetes DNS as you normally would for your workloads. + +When transparent proxy mode is enabled, all service-to-service traffic is required to use mTLS. While onboarding new services to service mesh, your network may have mixed mTLS and non-mTLS traffic, which can result in broken service-to-service communication. You can temporarily enable permissive mTLS mode during the onboarding process so that existing mesh services can accept traffic from services that are not yet fully onboarded. Permissive mTLS enables sidecar proxies to access both mTLS and non-mTLS traffic. Refer to [Onboard mesh services in transparent proxy mode](/consul/docs/k8s/connect/onboarding-tproxy-mode) for additional information. + +### Kubernetes service mesh workload scenarios + +-> **Note:** A Kubernetes Service is **required** to register services on the Consul Service Mesh as Consul monitors the lifecyle of a Kubernetes service and its service instances using the service object. In addition the Kubernetes service is used to register and de-register the service from the Catalog. + +Below are multiple scenarios for registering workloads on Kubernetes onto Consul Service Mesh. Each scenario provides an example Kubernetes manifest to help quickly understand how to use Consul Service Mesh on a specific Kubernetes workload type. + +- [Kubernetes Pods running as a deployment](#kubernetes-pods-running-as-a-deployment) +- [Connecting to mesh-enabled Services](#connecting-to-mesh-enabled-services) +- [Kubernetes Jobs](#kubernetes-jobs) +- [Kubernetes Pods with Multiple ports](#kubernetes-pods-with-multiple-ports) + +#### Kubernetes Pods running as a deployment The following example shows a Kubernetes configuration that specifically enables service mesh connections for the `static-server` service. Consul starts and registers a sidecar proxy that listens on port 20000 by default and proxies valid inbound connections to port 8080. + + ```yaml apiVersion: v1 kind: Service @@ -72,27 +95,18 @@ spec: serviceAccountName: static-server ``` -To establish a connection to the Pod using service mesh, a client must use another mesh proxy. The client mesh proxy will use Consul service discovery to find all available upstream proxies and their public ports. - -### Service names - -When the service is onboarded, the name registered in Consul is set to the name of the Kubernetes Service associated with the Pod. You can specify a custom name for the service in the [`consul.hashicorp.com/connect-service` annotation](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service), but if ACLs are enabled, then the name of the service registered in Consul must match the Pod's `ServiceAccount` name. - -### Transparent proxy mode - -By default, the Consul service mesh runs in transparent proxy mode. This mode forces inbound and outbound traffic through the sidecar proxy even though the service binds to all interfaces. Transparent proxy infers the location of upstream services using Consul service intentions, and also allows you to use Kubernetes DNS as you normally would for your workloads. + -When transparent proxy mode is enabled, all service-to-service traffic is required to use mTLS. While onboarding new services to service mesh, your network may have mixed mTLS and non-mTLS traffic, which can result in broken service-to-service communication. You can temporarily enable permissive mTLS mode during the onboarding process so that existing mesh services can accept traffic from services that are not yet fully onboarded. Permissive mTLS enables sidecar proxies to access both mTLS and non-mTLS traffic. Refer to [Onboard mesh services in transparent proxy mode](/consul/docs/k8s/connect/onboarding-tproxy-mode) for additional information. +To establish a connection to the Pod using service mesh, a client must use another mesh proxy. The client mesh proxy will use Consul service discovery to find all available upstream proxies and their public ports. -### Connecting to Mesh-Enabled Services +#### Connecting to Mesh-Enabled Services The example Deployment specification below configures a Deployment that is capable of establishing connections to our previous example "static-server" service. The connection to this static text service happens over an authorized and encrypted connection via service mesh. --> **Note:** As of consul-k8s `v0.26.0` and Consul Helm `v0.32.0`, having a Kubernetes -Service is **required** to run services on the Consul Service Mesh. + ```yaml apiVersion: v1 @@ -138,6 +152,8 @@ spec: serviceAccountName: static-client ``` + + By default when ACLs are enabled or when ACLs default policy is `allow`, Consul will automatically configure proxies with all upstreams from the same datacenter. When ACLs are enabled with default `deny` policy, @@ -172,7 +188,95 @@ $ kubectl exec deploy/static-client -- curl --silent http://static-server/ command terminated with exit code 52 ``` -### Kubernetes Pods with Multiple ports +#### Kubernetes Jobs + +Kubernetes Jobs run pods that successfully terminate and only make outbound requests to services on the mesh. In order to register a Kubernetes job on the mesh, you must provide an integer value for the `consul.hashicorp.com/sidecar-proxy-lifecycle-shutdown-grace-period-seconds` annotation, and issue a request the `http://127.0.0.1:20600/graceful_shutdown` API endpoint for `consul-dataplane` to gracefully shut down the `consul-dataplane` sidecar after the job is complete. , + +Below is an example Kubernetes manifest that deploys a job correctly. + + + +```yaml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-job + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + name: test-job + namespace: default +spec: + selector: + app: test-job + ports: + - port: 80 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: test-job + namespace: default + labels: + app: test-job +spec: + template: + metadata: + annotations: + 'consul.hashicorp.com/connect-inject': 'true' + 'consul.hashicorp.com/sidecar-proxy-lifecycle-shutdown-grace-period-seconds': '5' + labels: + app: test-job + spec: + containers: + - name: test-job + image: alpine/curl:3.14 + ports: + - containerPort: 80 + command: + - /bin/sh + - -c + - | + echo "Started test job" + sleep 10 + echo "Killing proxy" + curl --max-time 2 -s -f -XPOST http://127.0.0.1:20600/graceful_shutdown + sleep 10 + echo "Ended test job" + serviceAccountName: test-job + restartPolicy: Never +``` + + + +Upon completing the job you should be able to verify that all containers are shut down within the pod. + +```shell-session +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +test-job-49st7 0/2 Completed 0 3m55s +``` + +```shell-session +$ kubectl get job +NAME COMPLETIONS DURATION AGE +test-job 1/1 30s 4m31s +``` + +In addition, based on the logs emitted by the pod you can verify that the proxy was indeed shut down prior to job completing. + +```shell-session +$ kubectl logs test-job-49st7 -c test-job +Started test job +Killing proxy +Ended test job +``` + +#### Kubernetes Pods with Multiple ports + To configure a pod with multiple ports to be a part of the service mesh and receive and send service mesh traffic, you will need to add configuration so that a Consul service can be registered per port. This is because services in Consul currently support a single port per service instance. @@ -184,6 +288,9 @@ First, decide on the names for the two Consul services that will correspond to t chooses the names `web` for `8080` and `web-admin` for `9090`. Create two service accounts for `web` and `web-admin`: + + + ```yaml apiVersion: v1 kind: ServiceAccount @@ -195,7 +302,14 @@ kind: ServiceAccount metadata: name: web-admin ``` + + + + Create two Service objects for `web` and `web-admin`: + + + ```yaml apiVersion: v1 kind: Service @@ -221,12 +335,17 @@ spec: port: 80 targetPort: 9090 ``` + + + `web` will target `containerPort` `8080` and select pods labeled `app: web`. `web-admin` will target `containerPort` `9090` and will also select the same pods. ~> Kubernetes 1.24+ only In Kubernetes 1.24+ you need to [create a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#service-account-token-secrets) for each multi-port service that references the ServiceAccount, and the Kubernetes secret must have the same name as the ServiceAccount: + + ```yaml apiVersion: v1 kind: Secret @@ -245,12 +364,15 @@ metadata: type: kubernetes.io/service-account-token ``` + + Create a Deployment with any chosen name, and use the following annotations: ```yaml -consul.hashicorp.com/connect-inject: true -consul.hashicorp.com/transparent-proxy: false -consul.hashicorp.com/connect-service: web,web-admin -consul.hashicorp.com/connect-service-port: 8080,9090 +annotations: + 'consul.hashicorp.com/connect-inject': 'true' + 'consul.hashicorp.com/transparent-proxy': 'false' + 'consul.hashicorp.com/connect-service': 'web,web-admin' + 'consul.hashicorp.com/connect-service-port': '8080,9090' ``` Note that the order the ports are listed in the same order as the service names, i.e. the first service name `web` corresponds to the first port, `8080`, and the second service name `web-admin` corresponds to the second port, `9090`. @@ -260,7 +382,11 @@ The service account on the pod spec for the deployment should be set to the firs serviceAccountName: web ``` -For reference, the full deployment example could look something like the following: +For reference, a full deployment example is provided below with the correct annotations provided. In addition, the previous yaml manifests can also be combined into +a single manifest for easier deployment. + + + ```yaml apiVersion: apps/v1 kind: Deployment @@ -302,13 +428,61 @@ spec: serviceAccountName: web ``` + + After deploying the `web` application, you can test service mesh connections by deploying the `static-client` application with the configuration in the [previous section](#connecting-to-mesh-enabled-services) and add the -following annotation to the pod template on `static-client`: +`consul.hashicorp.com/connect-service-upstreams: 'web:1234,web-admin:2234'` annotation to the pod template on `static-client`: + + + ```yaml -consul.hashicorp.com/connect-service-upstreams: "web:1234,web-admin:2234" +apiVersion: v1 +kind: Service +metadata: + # This name will be the service name in Consul. + name: static-client +spec: + selector: + app: static-client + ports: + - port: 80 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: static-client +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: static-client +spec: + replicas: 1 + selector: + matchLabels: + app: static-client + template: + metadata: + name: static-client + labels: + app: static-client + annotations: + 'consul.hashicorp.com/connect-inject': 'true' + 'consul.hashicorp.com/connect-service-upstreams': 'web:1234,web-admin:2234' + spec: + containers: + - name: static-client + image: curlimages/curl:latest + # Just spin & wait forever, we'll use `kubectl exec` to demo + command: ['/bin/sh', '-c', '--'] + args: ['while true; do sleep 30; done;'] + # If ACLs are enabled, the serviceAccountName must match the Consul service name. + serviceAccountName: static-client ``` + + If you exec on to a static-client pod, using a command like: ```shell-session $ kubectl exec -it static-client-5bd667fbd6-kk6xs -- /bin/sh From b0a2e33e0a6c4ec411266dfcef38a3a648fefb50 Mon Sep 17 00:00:00 2001 From: David Yu Date: Fri, 7 Jul 2023 10:03:28 -0700 Subject: [PATCH 06/43] address feedback (#18045) --- website/content/docs/k8s/connect/index.mdx | 23 +++++++++++----------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/website/content/docs/k8s/connect/index.mdx b/website/content/docs/k8s/connect/index.mdx index 884119cd92a5..57096a4a29a8 100644 --- a/website/content/docs/k8s/connect/index.mdx +++ b/website/content/docs/k8s/connect/index.mdx @@ -21,24 +21,24 @@ If `connectInject.default` is set to `false` or you want to explicitly enable se ### Service names -When the service is onboarded, the name registered in Consul is set to the name of the Kubernetes Service associated with the Pod. You can specify a custom name for the service in the [`consul.hashicorp.com/connect-service` annotation](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service), but if ACLs are enabled, then the name of the service registered in Consul must match the Pod's `ServiceAccount` name. +When the service is onboarded, the name registered in Consul is set to the name of the Kubernetes Service associated with the Pod. You can use the [`consul.hashicorp.com/connect-service` annotation](/consul/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service) to specify a custom name for the service, but if ACLs are enabled then the name of the service registered in Consul must match the Pod's `ServiceAccount` name. ### Transparent proxy mode By default, the Consul service mesh runs in transparent proxy mode. This mode forces inbound and outbound traffic through the sidecar proxy even though the service binds to all interfaces. Transparent proxy infers the location of upstream services using Consul service intentions, and also allows you to use Kubernetes DNS as you normally would for your workloads. -When transparent proxy mode is enabled, all service-to-service traffic is required to use mTLS. While onboarding new services to service mesh, your network may have mixed mTLS and non-mTLS traffic, which can result in broken service-to-service communication. You can temporarily enable permissive mTLS mode during the onboarding process so that existing mesh services can accept traffic from services that are not yet fully onboarded. Permissive mTLS enables sidecar proxies to access both mTLS and non-mTLS traffic. Refer to [Onboard mesh services in transparent proxy mode](/consul/docs/k8s/connect/onboarding-tproxy-mode) for additional information. +When transparent proxy mode is enabled, all service-to-service traffic is required to use mTLS. When onboarding new services to service mesh, your network may have mixed mTLS and non-mTLS traffic, which can result in broken service-to-service communication. You can temporarily enable permissive mTLS mode during the onboarding process so that existing mesh services can accept traffic from services that are not yet fully onboarded. Permissive mTLS enables sidecar proxies to access both mTLS and non-mTLS traffic. Refer to [Onboard mesh services in transparent proxy mode](/consul/docs/k8s/connect/onboarding-tproxy-mode) for additional information. ### Kubernetes service mesh workload scenarios --> **Note:** A Kubernetes Service is **required** to register services on the Consul Service Mesh as Consul monitors the lifecyle of a Kubernetes service and its service instances using the service object. In addition the Kubernetes service is used to register and de-register the service from the Catalog. +-> **Note:** A Kubernetes Service is required in order to register services on the Consul service mesh. Consul monitors the lifecyle of the Kubernetes Service and its service instances using the service object. In addition, the Kubernetes service is used to register and de-register the service from Consul's catalog. -Below are multiple scenarios for registering workloads on Kubernetes onto Consul Service Mesh. Each scenario provides an example Kubernetes manifest to help quickly understand how to use Consul Service Mesh on a specific Kubernetes workload type. +The following configurations are examples for registering workloads on Kubernetes into Consul's service mesh in different scenarios. Each scenario provides an example Kubernetes manifest to demonstrate how to use Consul's service mesh with a specific Kubernetes workload type. - [Kubernetes Pods running as a deployment](#kubernetes-pods-running-as-a-deployment) - [Connecting to mesh-enabled Services](#connecting-to-mesh-enabled-services) - [Kubernetes Jobs](#kubernetes-jobs) -- [Kubernetes Pods with Multiple ports](#kubernetes-pods-with-multiple-ports) +- [Kubernetes Pods with multiple ports](#kubernetes-pods-with-multiple-ports) #### Kubernetes Pods running as a deployment @@ -97,9 +97,9 @@ spec: -To establish a connection to the Pod using service mesh, a client must use another mesh proxy. The client mesh proxy will use Consul service discovery to find all available upstream proxies and their public ports. +To establish a connection to the upstream Pod using service mesh, a client must dial the upstream workload using a mesh proxy. The client mesh proxy will use Consul service discovery to find all available upstream proxies and their public ports. -#### Connecting to Mesh-Enabled Services +#### Connecting to mesh-enabled Services The example Deployment specification below configures a Deployment that is capable of establishing connections to our previous example "static-server" service. The @@ -190,7 +190,7 @@ command terminated with exit code 52 #### Kubernetes Jobs -Kubernetes Jobs run pods that successfully terminate and only make outbound requests to services on the mesh. In order to register a Kubernetes job on the mesh, you must provide an integer value for the `consul.hashicorp.com/sidecar-proxy-lifecycle-shutdown-grace-period-seconds` annotation, and issue a request the `http://127.0.0.1:20600/graceful_shutdown` API endpoint for `consul-dataplane` to gracefully shut down the `consul-dataplane` sidecar after the job is complete. , +Kubernetes Jobs run pods that only make outbound requests to services on the mesh and successfully terminate when they are complete. In order to register a Kubernetes Job with the mesh, you must provide an integer value for the `consul.hashicorp.com/sidecar-proxy-lifecycle-shutdown-grace-period-seconds` annotation. Then, issue a request to the `http://127.0.0.1:20600/graceful_shutdown` API endpoint so that Kubernetes gracefully shuts down the `consul-dataplane` sidecar after the job is complete. Below is an example Kubernetes manifest that deploys a job correctly. @@ -266,7 +266,7 @@ NAME COMPLETIONS DURATION AGE test-job 1/1 30s 4m31s ``` -In addition, based on the logs emitted by the pod you can verify that the proxy was indeed shut down prior to job completing. +In addition, based on the logs emitted by the pod you can verify that the proxy was shut down before the Job completed. ```shell-session $ kubectl logs test-job-49st7 -c test-job @@ -275,7 +275,7 @@ Killing proxy Ended test job ``` -#### Kubernetes Pods with Multiple ports +#### Kubernetes Pods with multiple ports To configure a pod with multiple ports to be a part of the service mesh and receive and send service mesh traffic, you will need to add configuration so that a Consul service can be registered per port. This is because services in Consul @@ -382,8 +382,7 @@ The service account on the pod spec for the deployment should be set to the firs serviceAccountName: web ``` -For reference, a full deployment example is provided below with the correct annotations provided. In addition, the previous yaml manifests can also be combined into -a single manifest for easier deployment. +The following deployment example demonstrates the required annotations for the manifest. In addition, the previous YAML manifests can also be combined into a single manifest for easier deployment. From f4b08040fd3d3ac73d7c73dd0f41b38eefb8c6f3 Mon Sep 17 00:00:00 2001 From: Fulvio Date: Mon, 10 Jul 2023 17:34:41 +0200 Subject: [PATCH 07/43] Add verify server hostname to tls default (#17155) --- .changelog/17155.txt | 3 + agent/config/builder.go | 11 +- agent/config/runtime_test.go | 111 +++++++++++++++++- .../docs/agent/config/config-files.mdx | 15 +-- website/content/docs/agent/index.mdx | 3 - .../docs/security/security-models/core.mdx | 6 - 6 files changed, 126 insertions(+), 23 deletions(-) create mode 100644 .changelog/17155.txt diff --git a/.changelog/17155.txt b/.changelog/17155.txt new file mode 100644 index 000000000000..03cec33e991a --- /dev/null +++ b/.changelog/17155.txt @@ -0,0 +1,3 @@ +```release-note:improvement +config: Add new `tls.defaults.verify_server_hostname` configuration option. This specifies the default value for any interfaces that support the `verify_server_hostname` option. +``` diff --git a/agent/config/builder.go b/agent/config/builder.go index 6acd1b0039ee..98bac1711cac 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -2653,10 +2653,10 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error return c, errors.New("verify_outgoing is not valid in the tls.grpc stanza") } - // Similarly, only the internal RPC configuration honors VerifyServerHostname + // Similarly, only the internal RPC and defaults configuration honor VerifyServerHostname // so we call it out here too. - if t.Defaults.VerifyServerHostname != nil || t.GRPC.VerifyServerHostname != nil || t.HTTPS.VerifyServerHostname != nil { - return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza") + if t.GRPC.VerifyServerHostname != nil || t.HTTPS.VerifyServerHostname != nil { + return c, errors.New("verify_server_hostname is only valid in the tls.defaults and tls.internal_rpc stanzas") } // And UseAutoCert right now only applies to external gRPC interface. @@ -2706,8 +2706,11 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error } mapCommon("internal_rpc", t.InternalRPC, &c.InternalRPC) - c.InternalRPC.VerifyServerHostname = boolVal(t.InternalRPC.VerifyServerHostname) + c.InternalRPC.VerifyServerHostname = boolVal(t.Defaults.VerifyServerHostname) + if t.InternalRPC.VerifyServerHostname != nil { + c.InternalRPC.VerifyServerHostname = boolVal(t.InternalRPC.VerifyServerHostname) + } // Setting only verify_server_hostname is documented to imply verify_outgoing. // If it doesn't then we risk sending communication over plain TCP when we // documented it as forcing TLS for RPCs. Enforce this here rather than in diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index cc5451804dd7..b18a63162484 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -2736,7 +2736,44 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { } } `}, - expectedErr: "verify_server_hostname is only valid in the tls.internal_rpc stanza", + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.InternalRPC.VerifyServerHostname = true + rt.TLS.InternalRPC.VerifyOutgoing = true + }, + }) + run(t, testCase{ + desc: "verify_server_hostname in the defaults stanza and internal_rpc", + args: []string{ + `-data-dir=` + dataDir, + }, + hcl: []string{` + tls { + defaults { + verify_server_hostname = false + }, + internal_rpc { + verify_server_hostname = true + } + } + `}, + json: []string{` + { + "tls": { + "defaults": { + "verify_server_hostname": false + }, + "internal_rpc": { + "verify_server_hostname": true + } + } + } + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.InternalRPC.VerifyServerHostname = true + rt.TLS.InternalRPC.VerifyOutgoing = true + }, }) run(t, testCase{ desc: "verify_server_hostname in the grpc stanza", @@ -2759,7 +2796,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { } } `}, - expectedErr: "verify_server_hostname is only valid in the tls.internal_rpc stanza", + expectedErr: "verify_server_hostname is only valid in the tls.defaults and tls.internal_rpc stanza", }) run(t, testCase{ desc: "verify_server_hostname in the https stanza", @@ -2782,7 +2819,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { } } `}, - expectedErr: "verify_server_hostname is only valid in the tls.internal_rpc stanza", + expectedErr: "verify_server_hostname is only valid in the tls.defaults and tls.internal_rpc stanza", }) run(t, testCase{ desc: "translated keys", @@ -5723,6 +5760,74 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { rt.TLS.InternalRPC.VerifyOutgoing = true }, }) + run(t, testCase{ + desc: "tls.defaults.verify_server_hostname implies tls.internal_rpc.verify_outgoing", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + "tls": { + "defaults": { + "verify_server_hostname": true + } + } + } + `}, + hcl: []string{` + tls { + defaults { + verify_server_hostname = true + } + } + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + + rt.TLS.InternalRPC.VerifyServerHostname = true + rt.TLS.InternalRPC.VerifyOutgoing = true + }, + }) + run(t, testCase{ + desc: "tls.internal_rpc.verify_server_hostname overwrites tls.defaults.verify_server_hostname", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + "tls": { + "defaults": { + "verify_server_hostname": false + }, + "internal_rpc": { + "verify_server_hostname": true + } + } + } + `}, + hcl: []string{` + tls { + defaults { + verify_server_hostname = false + }, + internal_rpc { + verify_server_hostname = true + } + } + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + + rt.TLS.InternalRPC.VerifyServerHostname = true + rt.TLS.InternalRPC.VerifyOutgoing = true + }, + }) run(t, testCase{ desc: "tls.grpc.use_auto_cert defaults to false", args: []string{ diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index 1b382341e467..8d46b63bd0ce 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -2094,6 +2094,12 @@ specially crafted certificate signed by the CA can be used to gain full access t * `TLSv1_2` (default) * `TLSv1_3` + - `verify_server_hostname` ((#tls_internal_rpc_verify_server_hostname)) When + set to true, Consul verifies the TLS certificate presented by the servers + match the hostname `server..`. By default this is false, + and Consul does not verify the hostname of the certificate, only that it + is signed by a trusted CA. + **WARNING: TLS 1.1 and lower are generally considered less secure and should not be used if possible.** @@ -2201,7 +2207,7 @@ specially crafted certificate signed by the CA can be used to gain full access t only way to enforce that no client can communicate with a server unencrypted is to also enable `verify_incoming` which requires client certificates too. - - `verify_server_hostname` ((#tls_internal_rpc_verify_server_hostname)) When + - `verify_server_hostname` Overrides [tls.defaults.verify_server_hostname](#tls_defaults_verify_server_hostname). When set to true, Consul verifies the TLS certificate presented by the servers match the hostname `server..`. By default this is false, and Consul does not verify the hostname of the certificate, only that it @@ -2285,9 +2291,6 @@ tls { ca_file = "/etc/pki/tls/certs/ca-bundle.crt" verify_incoming = true verify_outgoing = true - } - - internal_rpc { verify_server_hostname = true } } @@ -2316,9 +2319,7 @@ tls { "cert_file": "/etc/pki/tls/certs/my.crt", "ca_file": "/etc/pki/tls/certs/ca-bundle.crt", "verify_incoming": true, - "verify_outgoing": true - }, - "internal_rpc": { + "verify_outgoing": true, "verify_server_hostname": true } } diff --git a/website/content/docs/agent/index.mdx b/website/content/docs/agent/index.mdx index ec68e0a1ce1f..b5a06b39e640 100644 --- a/website/content/docs/agent/index.mdx +++ b/website/content/docs/agent/index.mdx @@ -276,9 +276,6 @@ tls { ca_file = "/consul/config/certs/consul-agent-ca.pem" cert_file = "/consul/config/certs/dc1-server-consul-0.pem" key_file = "/consul/config/certs/dc1-server-consul-0-key.pem" - } - - internal_rpc { verify_server_hostname = true } } diff --git a/website/content/docs/security/security-models/core.mdx b/website/content/docs/security/security-models/core.mdx index 92a5c1ac91c2..2b6bb0515d71 100644 --- a/website/content/docs/security/security-models/core.mdx +++ b/website/content/docs/security/security-models/core.mdx @@ -128,9 +128,6 @@ environment and adapt these configurations accordingly. ca_file = "consul-agent-ca.pem" cert_file = "dc1-server-consul-0.pem" key_file = "dc1-server-consul-0-key.pem" - } - - internal_rpc { verify_server_hostname = true } } @@ -148,9 +145,6 @@ environment and adapt these configurations accordingly. verify_incoming = false verify_outgoing = true ca_file = "consul-agent-ca.pem" - } - - internal_rpc { verify_server_hostname = true } } From 1b08626358a42815852ecc943d929714a346b75b Mon Sep 17 00:00:00 2001 From: Dan Stough Date: Mon, 10 Jul 2023 17:08:06 -0400 Subject: [PATCH 08/43] [OSS] Fix initial_fetch_timeout to wait for all xDS resources (#18024) * fix(connect): set initial_fetch_time to wait indefinitely * changelog * PR feedback 1 --- .changelog/18024.txt | 3 ++ agent/proxycfg/api_gateway.go | 20 +++++++++--- agent/proxycfg/ingress_gateway.go | 6 +++- agent/proxycfg/snapshot.go | 31 ++++++++++++++++--- agent/proxycfg/state.go | 2 ++ agent/proxycfg/testing_api_gateway.go | 11 ++++--- command/connect/envoy/bootstrap_tpl.go | 2 ++ ..._ADDR-with-https-scheme-enables-tls.golden | 2 ++ ...tps-scheme-does-not-affect-grpc-tls.golden | 2 ++ .../envoy/testdata/access-log-path.golden | 2 ++ .../access-logs-enabled-custom.golden | 2 ++ .../envoy/testdata/access-logs-enabled.golden | 2 ++ .../testdata/acl-enabled-and-token.golden | 2 ++ .../testdata/acl-enabled-but-no-token.golden | 2 ++ ...AIN-and-CONSUL_GRPC_ADDR-TLS-is-tls.golden | 2 ++ ...and-CONSUL_GRPC_ADDR-PLAIN-is-plain.golden | 2 ++ .../envoy/testdata/defaults-nodemeta.golden | 2 ++ .../connect/envoy/testdata/defaults.golden | 2 ++ .../deprecated-grpc-addr-config.golden | 2 ++ .../testdata/envoy-readiness-probe.golden | 2 ++ .../envoy/testdata/existing-ca-file.golden | 2 ++ .../envoy/testdata/existing-ca-path.golden | 2 ++ .../envoy/testdata/extra_-multiple.golden | 2 ++ .../envoy/testdata/extra_-single.golden | 2 ++ .../envoy/testdata/grpc-addr-env.golden | 2 ++ .../envoy/testdata/grpc-addr-flag.golden | 2 ++ .../testdata/grpc-addr-unix-with-tls.golden | 2 ++ .../envoy/testdata/grpc-addr-unix.golden | 2 ++ .../testdata/grpc-tls-addr-config.golden | 2 ++ .../ingress-gateway-address-specified.golden | 2 ++ .../ingress-gateway-no-auto-register.golden | 2 ++ .../testdata/ingress-gateway-nodemeta.golden | 2 ++ ...-register-with-service-and-proxy-id.golden | 2 ++ ...ister-with-service-without-proxy-id.golden | 2 ++ .../envoy/testdata/ingress-gateway.golden | 2 ++ .../prometheus-metrics-tls-ca-file.golden | 2 ++ .../prometheus-metrics-tls-ca-path.golden | 2 ++ .../envoy/testdata/prometheus-metrics.golden | 2 ++ .../testdata/stats-config-override.golden | 2 ++ .../envoy/testdata/telemetry-collector.golden | 2 ++ .../connect/envoy/testdata/token-arg.golden | 2 ++ .../connect/envoy/testdata/token-env.golden | 2 ++ .../envoy/testdata/token-file-arg.golden | 2 ++ .../envoy/testdata/token-file-env.golden | 2 ++ .../envoy/testdata/xds-addr-config.golden | 2 ++ .../testdata/zipkin-tracing-config.golden | 2 ++ .../test/gateways/ingress_gateway_test.go | 5 --- 47 files changed, 137 insertions(+), 21 deletions(-) create mode 100644 .changelog/18024.txt diff --git a/.changelog/18024.txt b/.changelog/18024.txt new file mode 100644 index 000000000000..a661e7304c62 --- /dev/null +++ b/.changelog/18024.txt @@ -0,0 +1,3 @@ +```release-note:bug +connect: fix a bug with Envoy potentially starting with incomplete configuration by not waiting enough for initial xDS configuration. +``` \ No newline at end of file diff --git a/agent/proxycfg/api_gateway.go b/agent/proxycfg/api_gateway.go index 7a4a48d0d251..41eb5921259e 100644 --- a/agent/proxycfg/api_gateway.go +++ b/agent/proxycfg/api_gateway.go @@ -6,6 +6,7 @@ package proxycfg import ( "context" "fmt" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/leafcert" "github.com/hashicorp/consul/agent/proxycfg/internal/watch" @@ -48,13 +49,13 @@ func (h *handlerAPIGateway) initialize(ctx context.Context) (ConfigSnapshot, err } // Watch the api-gateway's config entry - err = h.subscribeToConfigEntry(ctx, structs.APIGateway, h.service, h.proxyID.EnterpriseMeta, gatewayConfigWatchID) + err = h.subscribeToConfigEntry(ctx, structs.APIGateway, h.service, h.proxyID.EnterpriseMeta, apiGatewayConfigWatchID) if err != nil { return snap, err } // Watch the bound-api-gateway's config entry - err = h.subscribeToConfigEntry(ctx, structs.BoundAPIGateway, h.service, h.proxyID.EnterpriseMeta, gatewayConfigWatchID) + err = h.subscribeToConfigEntry(ctx, structs.BoundAPIGateway, h.service, h.proxyID.EnterpriseMeta, boundGatewayConfigWatchID) if err != nil { return snap, err } @@ -108,9 +109,9 @@ func (h *handlerAPIGateway) handleUpdate(ctx context.Context, u UpdateEvent, sna if err := h.handleRootCAUpdate(u, snap); err != nil { return err } - case u.CorrelationID == gatewayConfigWatchID: + case u.CorrelationID == apiGatewayConfigWatchID || u.CorrelationID == boundGatewayConfigWatchID: // Handle change in the api-gateway or bound-api-gateway config entry - if err := h.handleGatewayConfigUpdate(ctx, u, snap); err != nil { + if err := h.handleGatewayConfigUpdate(ctx, u, snap, u.CorrelationID); err != nil { return err } case u.CorrelationID == inlineCertificateConfigWatchID: @@ -146,11 +147,20 @@ func (h *handlerAPIGateway) handleRootCAUpdate(u UpdateEvent, snap *ConfigSnapsh // In particular, we want to make sure that we're subscribing to any attached resources such // as routes and certificates. These additional subscriptions will enable us to update the // config snapshot appropriately for any route or certificate changes. -func (h *handlerAPIGateway) handleGatewayConfigUpdate(ctx context.Context, u UpdateEvent, snap *ConfigSnapshot) error { +func (h *handlerAPIGateway) handleGatewayConfigUpdate(ctx context.Context, u UpdateEvent, snap *ConfigSnapshot, correlationID string) error { resp, ok := u.Result.(*structs.ConfigEntryResponse) if !ok { return fmt.Errorf("invalid type for response: %T", u.Result) } else if resp.Entry == nil { + // A nil response indicates that we have the watch configured and that we are done with further changes + // until a new response comes in. By setting these earlier we allow a minimal xDS snapshot to configure the + // gateway. + if correlationID == apiGatewayConfigWatchID { + snap.APIGateway.GatewayConfigLoaded = true + } + if correlationID == boundGatewayConfigWatchID { + snap.APIGateway.BoundGatewayConfigLoaded = true + } return nil } diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index 22eb40505677..efb774c9b17c 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -95,6 +95,11 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent, if !ok { return fmt.Errorf("invalid type for response: %T", u.Result) } + + // We set this even if the response is empty so that we know the watch is set, + // but we don't block if the ingress config entry is unset for this gateway + snap.IngressGateway.GatewayConfigLoaded = true + if resp.Entry == nil { return nil } @@ -103,7 +108,6 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent, return fmt.Errorf("invalid type for config entry: %T", resp.Entry) } - snap.IngressGateway.GatewayConfigLoaded = true snap.IngressGateway.TLSConfig = gatewayConf.TLS if gatewayConf.Defaults != nil { snap.IngressGateway.Defaults = *gatewayConf.Defaults diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index e8f95d9651be..1d06e5fd8c9c 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -824,6 +824,18 @@ DOMAIN_LOOP: return services, upstreams, compiled, err } +// valid tests for two valid api gateway snapshot states: +// 1. waiting: the watch on api and bound gateway entries is set, but none were received +// 2. loaded: both the valid config entries AND the leaf certs are set +func (c *configSnapshotAPIGateway) valid() bool { + waiting := c.GatewayConfigLoaded && len(c.Upstreams) == 0 && c.BoundGatewayConfigLoaded && c.Leaf == nil + + // If we have a leaf, it implies we successfully watched parent resources + loaded := c.GatewayConfigLoaded && c.BoundGatewayConfigLoaded && c.Leaf != nil + + return waiting || loaded +} + type configSnapshotIngressGateway struct { ConfigSnapshotUpstreams @@ -872,6 +884,18 @@ func (c *configSnapshotIngressGateway) isEmpty() bool { !c.MeshConfigSet } +// valid tests for two valid ingress snapshot states: +// 1. waiting: the watch on ingress config entries is set, but none were received +// 2. loaded: both the ingress config entry AND the leaf cert are set +func (c *configSnapshotIngressGateway) valid() bool { + waiting := c.GatewayConfigLoaded && len(c.Upstreams) == 0 && c.Leaf == nil + + // If we have a leaf, it implies we successfully watched parent resources + loaded := c.GatewayConfigLoaded && c.Leaf != nil + + return waiting || loaded +} + type APIGatewayListenerKey = IngressListenerKey func APIGatewayListenerKeyFromListener(l structs.APIGatewayListener) APIGatewayListenerKey { @@ -965,17 +989,14 @@ func (s *ConfigSnapshot) Valid() bool { case structs.ServiceKindIngressGateway: return s.Roots != nil && - s.IngressGateway.Leaf != nil && - s.IngressGateway.GatewayConfigLoaded && + s.IngressGateway.valid() && s.IngressGateway.HostsSet && s.IngressGateway.MeshConfigSet case structs.ServiceKindAPIGateway: // TODO Is this the proper set of things to validate? return s.Roots != nil && - s.APIGateway.Leaf != nil && - s.APIGateway.GatewayConfigLoaded && - s.APIGateway.BoundGatewayConfigLoaded && + s.APIGateway.valid() && s.APIGateway.MeshConfigSet default: return false diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index 028a3fd59da8..7bbb7f7b87c5 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -36,6 +36,8 @@ const ( serviceResolversWatchID = "service-resolvers" gatewayServicesWatchID = "gateway-services" gatewayConfigWatchID = "gateway-config" + apiGatewayConfigWatchID = "api-gateway-config" + boundGatewayConfigWatchID = "bound-gateway-config" inlineCertificateConfigWatchID = "inline-certificate-config" routeConfigWatchID = "route-config" externalServiceIDPrefix = "external-service:" diff --git a/agent/proxycfg/testing_api_gateway.go b/agent/proxycfg/testing_api_gateway.go index 75229ccfdd3b..87ff58fbf053 100644 --- a/agent/proxycfg/testing_api_gateway.go +++ b/agent/proxycfg/testing_api_gateway.go @@ -6,9 +6,10 @@ package proxycfg import ( "fmt" + "github.com/mitchellh/go-testing-interface" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/discoverychain" - "github.com/mitchellh/go-testing-interface" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/structs" @@ -49,13 +50,13 @@ func TestConfigSnapshotAPIGateway( Result: placeholderLeaf, }, { - CorrelationID: gatewayConfigWatchID, + CorrelationID: apiGatewayConfigWatchID, Result: &structs.ConfigEntryResponse{ Entry: entry, }, }, { - CorrelationID: gatewayConfigWatchID, + CorrelationID: boundGatewayConfigWatchID, Result: &structs.ConfigEntryResponse{ Entry: boundEntry, }, @@ -141,13 +142,13 @@ func TestConfigSnapshotAPIGateway_NilConfigEntry( Result: roots, }, { - CorrelationID: gatewayConfigWatchID, + CorrelationID: apiGatewayConfigWatchID, Result: &structs.ConfigEntryResponse{ Entry: nil, // The first watch on a config entry will return nil if the config entry doesn't exist. }, }, { - CorrelationID: gatewayConfigWatchID, + CorrelationID: boundGatewayConfigWatchID, Result: &structs.ConfigEntryResponse{ Entry: nil, // The first watch on a config entry will return nil if the config entry doesn't exist. }, diff --git a/command/connect/envoy/bootstrap_tpl.go b/command/connect/envoy/bootstrap_tpl.go index 78d88f9b1d11..26b8e2118b1e 100644 --- a/command/connect/envoy/bootstrap_tpl.go +++ b/command/connect/envoy/bootstrap_tpl.go @@ -281,10 +281,12 @@ const bootstrapTemplate = `{ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/CONSUL_GRPC_ADDR-with-https-scheme-enables-tls.golden b/command/connect/envoy/testdata/CONSUL_GRPC_ADDR-with-https-scheme-enables-tls.golden index a8ba4704f5e3..c8144ac72fa2 100644 --- a/command/connect/envoy/testdata/CONSUL_GRPC_ADDR-with-https-scheme-enables-tls.golden +++ b/command/connect/envoy/testdata/CONSUL_GRPC_ADDR-with-https-scheme-enables-tls.golden @@ -197,10 +197,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/CONSUL_HTTP_ADDR-with-https-scheme-does-not-affect-grpc-tls.golden b/command/connect/envoy/testdata/CONSUL_HTTP_ADDR-with-https-scheme-does-not-affect-grpc-tls.golden index 5485256b4f99..a89575d2c167 100644 --- a/command/connect/envoy/testdata/CONSUL_HTTP_ADDR-with-https-scheme-does-not-affect-grpc-tls.golden +++ b/command/connect/envoy/testdata/CONSUL_HTTP_ADDR-with-https-scheme-does-not-affect-grpc-tls.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/access-log-path.golden b/command/connect/envoy/testdata/access-log-path.golden index 18a28a491e86..184a290b245f 100644 --- a/command/connect/envoy/testdata/access-log-path.golden +++ b/command/connect/envoy/testdata/access-log-path.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/access-logs-enabled-custom.golden b/command/connect/envoy/testdata/access-logs-enabled-custom.golden index 6af720bdaccb..50531f89c257 100644 --- a/command/connect/envoy/testdata/access-logs-enabled-custom.golden +++ b/command/connect/envoy/testdata/access-logs-enabled-custom.golden @@ -197,10 +197,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/access-logs-enabled.golden b/command/connect/envoy/testdata/access-logs-enabled.golden index 892cf46324b4..ea2e15060718 100644 --- a/command/connect/envoy/testdata/access-logs-enabled.golden +++ b/command/connect/envoy/testdata/access-logs-enabled.golden @@ -219,10 +219,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/acl-enabled-and-token.golden b/command/connect/envoy/testdata/acl-enabled-and-token.golden index 3342f0390471..a5427c7f2454 100644 --- a/command/connect/envoy/testdata/acl-enabled-and-token.golden +++ b/command/connect/envoy/testdata/acl-enabled-and-token.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/acl-enabled-but-no-token.golden b/command/connect/envoy/testdata/acl-enabled-but-no-token.golden index 5485256b4f99..a89575d2c167 100644 --- a/command/connect/envoy/testdata/acl-enabled-but-no-token.golden +++ b/command/connect/envoy/testdata/acl-enabled-but-no-token.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/both-CONSUL_HTTP_ADDR-PLAIN-and-CONSUL_GRPC_ADDR-TLS-is-tls.golden b/command/connect/envoy/testdata/both-CONSUL_HTTP_ADDR-PLAIN-and-CONSUL_GRPC_ADDR-TLS-is-tls.golden index a8ba4704f5e3..c8144ac72fa2 100644 --- a/command/connect/envoy/testdata/both-CONSUL_HTTP_ADDR-PLAIN-and-CONSUL_GRPC_ADDR-TLS-is-tls.golden +++ b/command/connect/envoy/testdata/both-CONSUL_HTTP_ADDR-PLAIN-and-CONSUL_GRPC_ADDR-TLS-is-tls.golden @@ -197,10 +197,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/both-CONSUL_HTTP_ADDR-TLS-and-CONSUL_GRPC_ADDR-PLAIN-is-plain.golden b/command/connect/envoy/testdata/both-CONSUL_HTTP_ADDR-TLS-and-CONSUL_GRPC_ADDR-PLAIN-is-plain.golden index 5485256b4f99..a89575d2c167 100644 --- a/command/connect/envoy/testdata/both-CONSUL_HTTP_ADDR-TLS-and-CONSUL_GRPC_ADDR-PLAIN-is-plain.golden +++ b/command/connect/envoy/testdata/both-CONSUL_HTTP_ADDR-TLS-and-CONSUL_GRPC_ADDR-PLAIN-is-plain.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/defaults-nodemeta.golden b/command/connect/envoy/testdata/defaults-nodemeta.golden index 41788c3c42e4..ea65421109f3 100644 --- a/command/connect/envoy/testdata/defaults-nodemeta.golden +++ b/command/connect/envoy/testdata/defaults-nodemeta.golden @@ -185,10 +185,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/defaults.golden b/command/connect/envoy/testdata/defaults.golden index 5485256b4f99..a89575d2c167 100644 --- a/command/connect/envoy/testdata/defaults.golden +++ b/command/connect/envoy/testdata/defaults.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/deprecated-grpc-addr-config.golden b/command/connect/envoy/testdata/deprecated-grpc-addr-config.golden index ab87aa9b438f..e604c61d5063 100644 --- a/command/connect/envoy/testdata/deprecated-grpc-addr-config.golden +++ b/command/connect/envoy/testdata/deprecated-grpc-addr-config.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/envoy-readiness-probe.golden b/command/connect/envoy/testdata/envoy-readiness-probe.golden index 444528e8808e..18680a3206b3 100644 --- a/command/connect/envoy/testdata/envoy-readiness-probe.golden +++ b/command/connect/envoy/testdata/envoy-readiness-probe.golden @@ -273,10 +273,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/existing-ca-file.golden b/command/connect/envoy/testdata/existing-ca-file.golden index a8ba4704f5e3..c8144ac72fa2 100644 --- a/command/connect/envoy/testdata/existing-ca-file.golden +++ b/command/connect/envoy/testdata/existing-ca-file.golden @@ -197,10 +197,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/existing-ca-path.golden b/command/connect/envoy/testdata/existing-ca-path.golden index e246dbb324eb..c0566ab351d7 100644 --- a/command/connect/envoy/testdata/existing-ca-path.golden +++ b/command/connect/envoy/testdata/existing-ca-path.golden @@ -197,10 +197,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/extra_-multiple.golden b/command/connect/envoy/testdata/extra_-multiple.golden index 038e65d8d9fb..819994f0c0ff 100644 --- a/command/connect/envoy/testdata/extra_-multiple.golden +++ b/command/connect/envoy/testdata/extra_-multiple.golden @@ -206,10 +206,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/extra_-single.golden b/command/connect/envoy/testdata/extra_-single.golden index 14b8f0820500..b1fb71997b3f 100644 --- a/command/connect/envoy/testdata/extra_-single.golden +++ b/command/connect/envoy/testdata/extra_-single.golden @@ -197,10 +197,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/grpc-addr-env.golden b/command/connect/envoy/testdata/grpc-addr-env.golden index ab87aa9b438f..e604c61d5063 100644 --- a/command/connect/envoy/testdata/grpc-addr-env.golden +++ b/command/connect/envoy/testdata/grpc-addr-env.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/grpc-addr-flag.golden b/command/connect/envoy/testdata/grpc-addr-flag.golden index ab87aa9b438f..e604c61d5063 100644 --- a/command/connect/envoy/testdata/grpc-addr-flag.golden +++ b/command/connect/envoy/testdata/grpc-addr-flag.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/grpc-addr-unix-with-tls.golden b/command/connect/envoy/testdata/grpc-addr-unix-with-tls.golden index 89764c310a47..2b902d48d476 100644 --- a/command/connect/envoy/testdata/grpc-addr-unix-with-tls.golden +++ b/command/connect/envoy/testdata/grpc-addr-unix-with-tls.golden @@ -196,10 +196,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/grpc-addr-unix.golden b/command/connect/envoy/testdata/grpc-addr-unix.golden index eb8841ae993f..88e6e46bfb95 100644 --- a/command/connect/envoy/testdata/grpc-addr-unix.golden +++ b/command/connect/envoy/testdata/grpc-addr-unix.golden @@ -183,10 +183,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/grpc-tls-addr-config.golden b/command/connect/envoy/testdata/grpc-tls-addr-config.golden index e79cf0455d2b..e65fc5f09cd6 100644 --- a/command/connect/envoy/testdata/grpc-tls-addr-config.golden +++ b/command/connect/envoy/testdata/grpc-tls-addr-config.golden @@ -197,10 +197,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/ingress-gateway-address-specified.golden b/command/connect/envoy/testdata/ingress-gateway-address-specified.golden index cc2e504eaf95..038c8fcc5dd2 100644 --- a/command/connect/envoy/testdata/ingress-gateway-address-specified.golden +++ b/command/connect/envoy/testdata/ingress-gateway-address-specified.golden @@ -273,10 +273,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/ingress-gateway-no-auto-register.golden b/command/connect/envoy/testdata/ingress-gateway-no-auto-register.golden index cddaa490dd2c..80f1f9a8b7c6 100644 --- a/command/connect/envoy/testdata/ingress-gateway-no-auto-register.golden +++ b/command/connect/envoy/testdata/ingress-gateway-no-auto-register.golden @@ -273,10 +273,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/ingress-gateway-nodemeta.golden b/command/connect/envoy/testdata/ingress-gateway-nodemeta.golden index d79739ed7280..8a2d5f62768e 100644 --- a/command/connect/envoy/testdata/ingress-gateway-nodemeta.golden +++ b/command/connect/envoy/testdata/ingress-gateway-nodemeta.golden @@ -274,10 +274,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/ingress-gateway-register-with-service-and-proxy-id.golden b/command/connect/envoy/testdata/ingress-gateway-register-with-service-and-proxy-id.golden index 3149fc4b33a5..6192f674ac17 100644 --- a/command/connect/envoy/testdata/ingress-gateway-register-with-service-and-proxy-id.golden +++ b/command/connect/envoy/testdata/ingress-gateway-register-with-service-and-proxy-id.golden @@ -273,10 +273,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/ingress-gateway-register-with-service-without-proxy-id.golden b/command/connect/envoy/testdata/ingress-gateway-register-with-service-without-proxy-id.golden index f8eb44754117..92d80e033e9b 100644 --- a/command/connect/envoy/testdata/ingress-gateway-register-with-service-without-proxy-id.golden +++ b/command/connect/envoy/testdata/ingress-gateway-register-with-service-without-proxy-id.golden @@ -273,10 +273,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/ingress-gateway.golden b/command/connect/envoy/testdata/ingress-gateway.golden index c4fba520ed99..40a5c74d1bc2 100644 --- a/command/connect/envoy/testdata/ingress-gateway.golden +++ b/command/connect/envoy/testdata/ingress-gateway.golden @@ -273,10 +273,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/prometheus-metrics-tls-ca-file.golden b/command/connect/envoy/testdata/prometheus-metrics-tls-ca-file.golden index 71b82250e30d..c93286e64e81 100644 --- a/command/connect/envoy/testdata/prometheus-metrics-tls-ca-file.golden +++ b/command/connect/envoy/testdata/prometheus-metrics-tls-ca-file.golden @@ -310,10 +310,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/prometheus-metrics-tls-ca-path.golden b/command/connect/envoy/testdata/prometheus-metrics-tls-ca-path.golden index 07e56ae878a7..272175f70733 100644 --- a/command/connect/envoy/testdata/prometheus-metrics-tls-ca-path.golden +++ b/command/connect/envoy/testdata/prometheus-metrics-tls-ca-path.golden @@ -310,10 +310,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/prometheus-metrics.golden b/command/connect/envoy/testdata/prometheus-metrics.golden index 5adc8a9485ee..5806dd671f76 100644 --- a/command/connect/envoy/testdata/prometheus-metrics.golden +++ b/command/connect/envoy/testdata/prometheus-metrics.golden @@ -273,10 +273,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/stats-config-override.golden b/command/connect/envoy/testdata/stats-config-override.golden index d2a2371f20ea..dd4ff53dd194 100644 --- a/command/connect/envoy/testdata/stats-config-override.golden +++ b/command/connect/envoy/testdata/stats-config-override.golden @@ -62,10 +62,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/telemetry-collector.golden b/command/connect/envoy/testdata/telemetry-collector.golden index 3977ce65bba3..81ef48662298 100644 --- a/command/connect/envoy/testdata/telemetry-collector.golden +++ b/command/connect/envoy/testdata/telemetry-collector.golden @@ -222,10 +222,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/token-arg.golden b/command/connect/envoy/testdata/token-arg.golden index ac057e9a60b1..816bf7487572 100644 --- a/command/connect/envoy/testdata/token-arg.golden +++ b/command/connect/envoy/testdata/token-arg.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/token-env.golden b/command/connect/envoy/testdata/token-env.golden index ac057e9a60b1..816bf7487572 100644 --- a/command/connect/envoy/testdata/token-env.golden +++ b/command/connect/envoy/testdata/token-env.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/token-file-arg.golden b/command/connect/envoy/testdata/token-file-arg.golden index ac057e9a60b1..816bf7487572 100644 --- a/command/connect/envoy/testdata/token-file-arg.golden +++ b/command/connect/envoy/testdata/token-file-arg.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/token-file-env.golden b/command/connect/envoy/testdata/token-file-env.golden index ac057e9a60b1..816bf7487572 100644 --- a/command/connect/envoy/testdata/token-file-env.golden +++ b/command/connect/envoy/testdata/token-file-env.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/xds-addr-config.golden b/command/connect/envoy/testdata/xds-addr-config.golden index ab87aa9b438f..e604c61d5063 100644 --- a/command/connect/envoy/testdata/xds-addr-config.golden +++ b/command/connect/envoy/testdata/xds-addr-config.golden @@ -184,10 +184,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/command/connect/envoy/testdata/zipkin-tracing-config.golden b/command/connect/envoy/testdata/zipkin-tracing-config.golden index fd52f5ae3f57..dc4779a7753d 100644 --- a/command/connect/envoy/testdata/zipkin-tracing-config.golden +++ b/command/connect/envoy/testdata/zipkin-tracing-config.golden @@ -217,10 +217,12 @@ "dynamic_resources": { "lds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "cds_config": { "ads": {}, + "initial_fetch_timeout": "0s", "resource_api_version": "V3" }, "ads_config": { diff --git a/test/integration/consul-container/test/gateways/ingress_gateway_test.go b/test/integration/consul-container/test/gateways/ingress_gateway_test.go index 1a8958741c8a..fcadd3a0fae6 100644 --- a/test/integration/consul-container/test/gateways/ingress_gateway_test.go +++ b/test/integration/consul-container/test/gateways/ingress_gateway_test.go @@ -62,11 +62,6 @@ func TestIngressGateway(t *testing.T) { ingressService, err := libservice.NewGatewayService(context.Background(), gwCfg, clientNode) require.NoError(t, err) - // this is deliberate - // internally, ingress gw have a 15s timeout before the /ready endpoint is available, - // then we need to wait for the health check to re-execute and propagate. - time.Sleep(45 * time.Second) - // We check this is healthy here because in the case of bringing up a new kube cluster, // it is not possible to create the config entry in advance. // The health checks must pass so the pod can start up. From 7decc305b952c9df32a84a7750c3a87d46c18e68 Mon Sep 17 00:00:00 2001 From: Krastin Krastev Date: Tue, 11 Jul 2023 15:09:32 +0200 Subject: [PATCH 09/43] ui: fix typos for peer service imports (#17999) --- ui/packages/consul-ui/translations/routes/en-us.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/packages/consul-ui/translations/routes/en-us.yaml b/ui/packages/consul-ui/translations/routes/en-us.yaml index da76128e88ea..129686384002 100644 --- a/ui/packages/consul-ui/translations/routes/en-us.yaml +++ b/ui/packages/consul-ui/translations/routes/en-us.yaml @@ -152,7 +152,7 @@ dc:
{items, select, 0 {Services must be exported from one peer to another to enable service communication across two peers. There don't seem to be any services imported from {name} yet, or you may not have services:read permissions to access to this view.} - other {No services where found matching that search, or you may not have access to view the services you are searching for.} + other {No services were found matching that search, or you may not have access to view the services you are searching for.} }
exported: @@ -162,7 +162,7 @@ dc:
{items, select, 0 {Services must be exported from one peer to another to enable service communication across two peers. There don't seem to be any services exported to {name} yet, or you may not have services:read permissions to access to this view.} - other {No services where found matching that search, or you may not have access to view the services you are searching for.} + other {No services were found matching that search, or you may not have access to view the services you are searching for.} }
From da79997f3dccade966455e2834e79bc9fd32d5e9 Mon Sep 17 00:00:00 2001 From: Dan Stough Date: Tue, 11 Jul 2023 11:28:27 -0400 Subject: [PATCH 10/43] test: fix FIPS inline cert test message (#18076) --- agent/structs/config_entry_inline_certificate_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/agent/structs/config_entry_inline_certificate_test.go b/agent/structs/config_entry_inline_certificate_test.go index 8c7754013106..b95f3b0e9694 100644 --- a/agent/structs/config_entry_inline_certificate_test.go +++ b/agent/structs/config_entry_inline_certificate_test.go @@ -162,7 +162,9 @@ func TestInlineCertificate(t *testing.T) { PrivateKey: tooShortPrivateKey, Certificate: "foo", }, - validateErr: "key length must be at least 2048 bits", + // non-FIPS: "key length must be at least 2048 bits" + // FIPS: "key length invalid: only RSA lengths of 2048, 3072, and 4096 are allowed in FIPS mode" + validateErr: "key length", }, "mismatched certificate": { entry: &InlineCertificateConfigEntry{ From a30ba335b6a6d4af2e6ccf3b47212a24655baffa Mon Sep 17 00:00:00 2001 From: Joshua Timmons Date: Tue, 11 Jul 2023 16:13:30 -0400 Subject: [PATCH 11/43] Fix a couple typos in Agent Telemetry Metrics docs (#18080) * Fix metrics docs * Add changelog Signed-off-by: josh --------- Signed-off-by: josh --- .changelog/18080.txt | 3 +++ website/content/docs/agent/telemetry.mdx | 10 +++++----- 2 files changed, 8 insertions(+), 5 deletions(-) create mode 100644 .changelog/18080.txt diff --git a/.changelog/18080.txt b/.changelog/18080.txt new file mode 100644 index 000000000000..9826b249eb31 --- /dev/null +++ b/.changelog/18080.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Fix some typos in metrics docs +``` \ No newline at end of file diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index 326f5b42dbf5..eae1c1aa4239 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -487,8 +487,8 @@ These metrics are used to monitor the health of the Consul servers. | `consul.raft.leader.oldestLogAge` | The number of milliseconds since the _oldest_ log in the leader's log store was written. This can be important for replication health where write rate is high and the snapshot is large as followers may be unable to recover from a restart if restoring takes longer than the minimum value for the current leader. Compare this with `consul.raft.fsm.lastRestoreDuration` and `consul.raft.rpc.installSnapshot` to monitor. In normal usage this gauge value will grow linearly over time until a snapshot completes on the leader and the log is truncated. Note: this metric won't be emitted until the leader writes a snapshot. After an upgrade to Consul 1.10.0 it won't be emitted until the oldest log was written after the upgrade. | ms | gauge | | `consul.raft.replication.heartbeat` | Measures the time taken to invoke appendEntries on a peer, so that it doesn't timeout on a periodic basis. | ms | timer | | `consul.raft.replication.appendEntries` | Measures the time it takes to replicate log entries to followers. This is a general indicator of the load pressure on the Consul servers, as well as the performance of the communication between the servers. | ms | timer | -| `consul.raft.replication.appendEntries.rpc` | Measures the time taken by the append entries RFC, to replicate the log entries of a leader agent onto its follower agent(s) | ms | timer | -| `consul.raft.replication.appendEntries.logs` | Measures the number of logs replicated to an agent, to bring it up to speed with the leader's logs. | logs appended/ interval | counter | +| `consul.raft.replication.appendEntries.rpc` | Measures the time taken by the append entries RPC to replicate the log entries of a leader agent onto its follower agent(s). | ms | timer | +| `consul.raft.replication.appendEntries.logs` | Counts the number of logs replicated to an agent to bring it up to speed with the leader's logs. | logs appended/ interval | counter | | `consul.raft.restore` | Counts the number of times the restore operation has been performed by the agent. Here, restore refers to the action of raft consuming an external snapshot to restore its state. | operation invoked / interval | counter | | `consul.raft.restoreUserSnapshot` | Measures the time taken by the agent to restore the FSM state from a user's snapshot | ms | timer | | `consul.raft.rpc.appendEntries` | Measures the time taken to process an append entries RPC call from an agent. | ms | timer | @@ -560,12 +560,12 @@ These metrics are used to monitor the health of the Consul servers. | `consul.leader.replication.namespaces.status` | This will only be emitted by the leader in a secondary datacenter. The value will be a 1 if the last round of namespace replication was successful or 0 if there was an error. | healthy | gauge | | `consul.leader.replication.namespaces.index` | This will only be emitted by the leader in a secondary datacenter. Increments to the index of namespaces in the primary datacenter that have been successfully replicated. | index | gauge | | `consul.prepared-query.apply` | Measures the time it takes to apply a prepared query update. | ms | timer | -| `consul.prepared-query.explain` | Measures the time it takes to process a prepared query explain request. | ms | timer | -| `consul.prepared-query.execute` | Measures the time it takes to process a prepared query execute request. | ms | timer | | `consul.prepared-query.execute_remote` | Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter. | ms | timer | +| `consul.prepared-query.execute` | Measures the time it takes to process a prepared query execute request. | ms | timer | +| `consul.prepared-query.explain` | Measures the time it takes to process a prepared query explain request. | ms | timer | | `consul.rpc.raft_handoff` | Increments when a server accepts a Raft-related RPC connection. | connections | counter | -| `consul.rpc.request_error` | Increments when a server returns an error from an RPC request. | errors | counter | | `consul.rpc.request` | Increments when a server receives a Consul-related RPC request. | requests | counter | +| `consul.rpc.request_error` | Increments when a server returns an error from an RPC request. | errors | counter | | `consul.rpc.query` | Increments when a server receives a read RPC request, indicating the rate of new read queries. See consul.rpc.queries_blocking for the current number of in-flight blocking RPC calls. This metric changed in 1.7.0 to only increment on the the start of a query. The rate of queries will appear lower, but is more accurate. | queries | counter | | `consul.rpc.queries_blocking` | The current number of in-flight blocking queries the server is handling. | queries | gauge | | `consul.rpc.cross-dc` | Increments when a server sends a (potentially blocking) cross datacenter RPC query. | queries | counter | From bfb921229d77239db513bf4c74dbc572eaacfc46 Mon Sep 17 00:00:00 2001 From: David Yu Date: Tue, 11 Jul 2023 15:37:53 -0700 Subject: [PATCH 12/43] docs updates - cluster peering and virtual services (#18069) * Update route-to-virtual-services.mdx * Update establish-peering.mdx --- .../docs/k8s/connect/cluster-peering/tech-specs.mdx | 4 ++-- .../cluster-peering/usage/establish-peering.mdx | 10 ++++++---- .../content/docs/k8s/l7-traffic/failover-tproxy.mdx | 2 +- .../docs/k8s/l7-traffic/route-to-virtual-services.mdx | 4 ++-- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/website/content/docs/k8s/connect/cluster-peering/tech-specs.mdx b/website/content/docs/k8s/connect/cluster-peering/tech-specs.mdx index cfe4ba7aebc5..2d27a4f369fc 100644 --- a/website/content/docs/k8s/connect/cluster-peering/tech-specs.mdx +++ b/website/content/docs/k8s/connect/cluster-peering/tech-specs.mdx @@ -41,7 +41,7 @@ Refer to the following example Helm configuration: ```yaml global: name: consul - image: "hashicorp/consul:1.14.1" + image: "hashicorp/consul:1.16.0" peering: enabled: true tls: @@ -166,4 +166,4 @@ If ACLs are enabled, you must add tokens to grant the following permissions: - Grant `service:write` permissions to services that define mesh gateways in their server definition. - Grant `service:read` permissions for all services on the partition. -- Grant `mesh:write` permissions to the mesh gateways that participate in cluster peering connections. This permission allows a leaf certificate to be issued for mesh gateways to terminate TLS sessions for HTTP requests. \ No newline at end of file +- Grant `mesh:write` permissions to the mesh gateways that participate in cluster peering connections. This permission allows a leaf certificate to be issued for mesh gateways to terminate TLS sessions for HTTP requests. diff --git a/website/content/docs/k8s/connect/cluster-peering/usage/establish-peering.mdx b/website/content/docs/k8s/connect/cluster-peering/usage/establish-peering.mdx index 167d4fdceced..375886132e50 100644 --- a/website/content/docs/k8s/connect/cluster-peering/usage/establish-peering.mdx +++ b/website/content/docs/k8s/connect/cluster-peering/usage/establish-peering.mdx @@ -48,7 +48,7 @@ After you provision a Kubernetes cluster and set up your kubeconfig file to mana $ export CLUSTER2_CONTEXT= ``` -### Update the Helm chart +### Install Consul using Helm and configure peering over mesh gateways To use cluster peering with Consul on Kubernetes deployments, update the Helm chart with [the required values](/consul/docs/k8s/connect/cluster-peering/tech-specs#helm-requirements). After updating the Helm chart, you can use the `consul-k8s` CLI to apply `values.yaml` to each cluster. @@ -59,7 +59,7 @@ To use cluster peering with Consul on Kubernetes deployments, update the Helm ch ``` ```shell-session - $ helm install ${HELM_RELEASE_NAME1} hashicorp/consul --create-namespace --namespace consul --version "1.0.1" --values values.yaml --set global.datacenter=dc1 --kube-context $CLUSTER1_CONTEXT + $ helm install ${HELM_RELEASE_NAME1} hashicorp/consul --create-namespace --namespace consul --version "1.2.0" --values values.yaml --set global.datacenter=dc1 --kube-context $CLUSTER1_CONTEXT ``` 1. In `cluster-02`, run the following commands: @@ -69,9 +69,11 @@ To use cluster peering with Consul on Kubernetes deployments, update the Helm ch ``` ```shell-session - $ helm install ${HELM_RELEASE_NAME2} hashicorp/consul --create-namespace --namespace consul --version "1.0.1" --values values.yaml --set global.datacenter=dc2 --kube-context $CLUSTER2_CONTEXT + $ helm install ${HELM_RELEASE_NAME2} hashicorp/consul --create-namespace --namespace consul --version "1.2.0" --values values.yaml --set global.datacenter=dc2 --kube-context $CLUSTER2_CONTEXT ``` +1. For both clusters apply the `Mesh` configuration entry values provided in [Mesh Gateway Specifications](/consul/docs/k8s/connect/cluster-peering/tech-specs#mesh-gateway-specifications) to allow establishing peering connections over mesh gateways. + ### Configure the mesh gateway mode for traffic between services In Kubernetes deployments, you can configure mesh gateways to use `local` mode so that a service dialing a service in a remote peer dials the remote mesh gateway instead of the local mesh gateway. To configure the mesh gateway mode so that this traffic always leaves through the local mesh gateway, you can use the `ProxyDefaults` CRD. @@ -452,4 +454,4 @@ For Consul Enterprise, the permissions apply to all imported services in the ser Refer to [Reading servers](/consul/docs/connect/config-entries/exported-services#reading-services) in the `exported-services` configuration entry documentation for example rules. -For additional information about how to configure and use ACLs, refer to [ACLs system overview](/consul/docs/security/acl). \ No newline at end of file +For additional information about how to configure and use ACLs, refer to [ACLs system overview](/consul/docs/security/acl). diff --git a/website/content/docs/k8s/l7-traffic/failover-tproxy.mdx b/website/content/docs/k8s/l7-traffic/failover-tproxy.mdx index 81bdc5e8672d..c08d8c3ac5d8 100644 --- a/website/content/docs/k8s/l7-traffic/failover-tproxy.mdx +++ b/website/content/docs/k8s/l7-traffic/failover-tproxy.mdx @@ -18,7 +18,7 @@ Complete the following steps to configure failover service instances in Consul o ## Requirements -- `consul-k8s` v1.2.0-beta1 or newer. +- `consul-k8s` v1.2.0 or newer. - Consul service mesh must be enabled. Refer to [How does Consul Service Mesh Work on Kubernetes](/consul/docs/k8s/connect). - Proxies must be configured to run in transparent proxy mode. - To query virtual DNS names, you must use Consul DNS. diff --git a/website/content/docs/k8s/l7-traffic/route-to-virtual-services.mdx b/website/content/docs/k8s/l7-traffic/route-to-virtual-services.mdx index c8951d356cf6..0852f81196fe 100644 --- a/website/content/docs/k8s/l7-traffic/route-to-virtual-services.mdx +++ b/website/content/docs/k8s/l7-traffic/route-to-virtual-services.mdx @@ -20,7 +20,7 @@ Complete the following steps to configure failover service instances in Consul o ## Requirements -- `consul-k8s` v1.2.0-beta1 or newer. +- `consul-k8s` v1.2.0 or newer. - Consul service mesh must be enabled. Refer to [How does Consul service mesh work on Kubernetes](/consul/docs/k8s/connect). - Proxies must be configured to run in transparent proxy mode. - To query virtual DNS names, you must use Consul DNS. @@ -119,4 +119,4 @@ You can query the KubeDNS if the real and virtual services are in the same Kuber http://virtual-api..svc.cluster.local ``` -Note that you cannot use KubeDNS if a corresponding Kubernetes service and pod do not exist. \ No newline at end of file +Note that you cannot use KubeDNS if a corresponding Kubernetes service and pod do not exist. From 0e58c899780f4ebd2b40ecade7c50a981a84592b Mon Sep 17 00:00:00 2001 From: david3a <49253132+david3a@users.noreply.github.com> Date: Wed, 12 Jul 2023 00:05:13 +0100 Subject: [PATCH 13/43] Update service-mesh-compare.mdx (#17279) grammar change --- website/content/docs/consul-vs-other/service-mesh-compare.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/consul-vs-other/service-mesh-compare.mdx b/website/content/docs/consul-vs-other/service-mesh-compare.mdx index b0848d2b90bc..419f5679bae1 100644 --- a/website/content/docs/consul-vs-other/service-mesh-compare.mdx +++ b/website/content/docs/consul-vs-other/service-mesh-compare.mdx @@ -14,5 +14,5 @@ Consul’s service mesh allows organizations to securely connect and manage thei Consul is platform agnostic — it supports any runtime (Kubernetes, EKS, AKS, GKE, VMs, ECS, Lambda, Nomad) and any cloud provider (AWS, Microsoft Azure, GCP, private clouds). This makes it one of the most flexible service discovery and service mesh platforms. While other service mesh software provides support for multiple runtimes for the data plane, they require you to run the control plane solely on Kubernetes. With Consul, you can run both the control plane and data plane in different runtimes. Consul also has several unique integrations with Vault, an industry standard for secrets management. Operators have the option to use Consul’s built-in certificate authority, or leverage Vault’s PKI engine to generate and store TLS certificates for both the data plane and control plane. In addition, Consul can automatically rotate the TLS certificates on both the data plane and control plane without requiring any type of restarts. This lets you rotate the certificates more frequently without incurring additional management burden on operators. -When deploying Consul on Kubernetes, you can store sensitive data including licenses, ACL tokens, and TLS certificates centrally Vault instead of Kubernetes secrets. Vault is much more secure than Kubernetes secrets because it automatically encrypts all data, provides advanced access controls to secrets, and provides centralized governance for all secrets. +When deploying Consul on Kubernetes, you can store sensitive data including licenses, ACL tokens, and TLS certificates centrally in Vault instead of Kubernetes secrets. Vault is much more secure than Kubernetes secrets because it automatically encrypts all data, provides advanced access controls to secrets, and provides centralized governance for all secrets. From bd5af7fe7da564502ba8c93b7ece551607186bb6 Mon Sep 17 00:00:00 2001 From: Curt Bushko Date: Tue, 11 Jul 2023 19:59:44 -0400 Subject: [PATCH 14/43] Update helm docs on main (#18085) --- website/content/docs/k8s/helm.mdx | 365 +++++++++++++++++++++++------- 1 file changed, 278 insertions(+), 87 deletions(-) diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index c4f639b2792b..06f77f32a98b 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -20,27 +20,22 @@ with Consul. Use these links to navigate to a particular top-level stanza. -- [Helm Chart Reference](#helm-chart-reference) - - [Top-Level Stanzas](#top-level-stanzas) - - [All Values](#all-values) - - [`global`](#h-global) - - [`server`](#h-server) - - [`externalServers`](#h-externalservers) - - [`client`](#h-client) - - [`dns`](#h-dns) - - [`ui`](#h-ui) - - [`syncCatalog`](#h-synccatalog) - - [`connectInject`](#h-connectinject) - - [`meshGateway`](#h-meshgateway) - - [`ingressGateways`](#h-ingressgateways) - - [`terminatingGateways`](#h-terminatinggateways) - - [`apiGateway`](#h-apigateway) - - [`webhookCertManager`](#h-webhookcertmanager) - - [`prometheus`](#h-prometheus) - - [`tests`](#h-tests) - - [`telemetryCollector`](#h-telemetrycollector) - - [Helm Chart Examples](#helm-chart-examples) - - [Customizing the Helm Chart](#customizing-the-helm-chart) +- [`global`](#h-global) +- [`server`](#h-server) +- [`externalServers`](#h-externalservers) +- [`client`](#h-client) +- [`dns`](#h-dns) +- [`ui`](#h-ui) +- [`syncCatalog`](#h-synccatalog) +- [`connectInject`](#h-connectinject) +- [`meshGateway`](#h-meshgateway) +- [`ingressGateways`](#h-ingressgateways) +- [`terminatingGateways`](#h-terminatinggateways) +- [`apiGateway`](#h-apigateway) +- [`webhookCertManager`](#h-webhookcertmanager) +- [`prometheus`](#h-prometheus) +- [`tests`](#h-tests) +- [`telemetryCollector`](#h-telemetrycollector) ## All Values @@ -64,7 +59,7 @@ Use these links to navigate to a particular top-level stanza. the prefix will be `-consul`. - `domain` ((#v-global-domain)) (`string: consul`) - The domain Consul will answer DNS queries for - (Refer to [`-domain`](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_domain)) and the domain services synced from + (Refer to [`-domain`](/consul/docs/agent/config/cli-flags#_domain)) and the domain services synced from Consul into Kubernetes will have, e.g. `service-name.service.consul`. - `peering` ((#v-global-peering)) - Configures the Cluster Peering feature. Requires Consul v1.14+ and Consul-K8s v1.0.0+. @@ -125,7 +120,7 @@ Use these links to navigate to a particular top-level stanza. - `secretsBackend` ((#v-global-secretsbackend)) - secretsBackend is used to configure Vault as the secrets backend for the Consul on Kubernetes installation. The Vault cluster needs to have the Kubernetes Auth Method, KV2 and PKI secrets engines enabled and have necessary secrets, policies and roles created prior to installing Consul. - Refer to [Vault as the Secrets Backend](https://developer.hashicorp.com/consul/docs/k8s/deployment-configurations/vault) + Refer to [Vault as the Secrets Backend](/consul/docs/k8s/deployment-configurations/vault) documentation for full instructions. The Vault cluster _must_ not have the Consul cluster installed by this Helm chart as its storage backend @@ -212,11 +207,11 @@ Use these links to navigate to a particular top-level stanza. - `secretKey` ((#v-global-secretsbackend-vault-ca-secretkey)) (`string: ""`) - The key within the Kubernetes or Vault secret that holds the Vault CA certificate. - - `connectCA` ((#v-global-secretsbackend-vault-connectca)) - Configuration for the Vault service mesh CA provider. + - `connectCA` ((#v-global-secretsbackend-vault-connectca)) - Configuration for the Vault Connect CA provider. The provider will be configured to use the Vault Kubernetes auth method and therefore requires the role provided by `global.secretsBackend.vault.consulServerRole` to have permissions to the root and intermediate PKI paths. - Please refer to [Vault ACL policies](https://developer.hashicorp.com/consul/docs/connect/ca/vault#vault-acl-policies) + Please refer to [Vault ACL policies](/consul/docs/connect/ca/vault#vault-acl-policies) documentation for information on how to configure the Vault policies. - `address` ((#v-global-secretsbackend-vault-connectca-address)) (`string: ""`) - The address of the Vault server. @@ -224,13 +219,13 @@ Use these links to navigate to a particular top-level stanza. - `authMethodPath` ((#v-global-secretsbackend-vault-connectca-authmethodpath)) (`string: kubernetes`) - The mount path of the Kubernetes auth method in Vault. - `rootPKIPath` ((#v-global-secretsbackend-vault-connectca-rootpkipath)) (`string: ""`) - The path to a PKI secrets engine for the root certificate. - For more details, please refer to [Vault service mesh CA configuration](https://developer.hashicorp.com/consul/docs/connect/ca/vault#rootpkipath). + For more details, please refer to [Vault Connect CA configuration](/consul/docs/connect/ca/vault#rootpkipath). - `intermediatePKIPath` ((#v-global-secretsbackend-vault-connectca-intermediatepkipath)) (`string: ""`) - The path to a PKI secrets engine for the generated intermediate certificate. - For more details, please refer to [Vault service mesh CA configuration](https://developer.hashicorp.com/consul/docs/connect/ca/vault#intermediatepkipath). + For more details, please refer to [Vault Connect CA configuration](/consul/docs/connect/ca/vault#intermediatepkipath). - - `additionalConfig` ((#v-global-secretsbackend-vault-connectca-additionalconfig)) (`string: {}`) - Additional service mesh CA configuration in JSON format. - Please refer to [Vault service mesh CA configuration](https://developer.hashicorp.com/consul/docs/connect/ca/vault#configuration) + - `additionalConfig` ((#v-global-secretsbackend-vault-connectca-additionalconfig)) (`string: {}`) - Additional Connect CA configuration in JSON format. + Please refer to [Vault Connect CA configuration](/consul/docs/connect/ca/vault#configuration) for all configuration options available for that provider. Example: @@ -251,20 +246,20 @@ Use these links to navigate to a particular top-level stanza. - `caCert` ((#v-global-secretsbackend-vault-connectinject-cacert)) - Configuration to the Vault Secret that Kubernetes uses on Kubernetes pod creation, deletion, and update, to get CA certificates - used issued from vault to send webhooks to the connect inject. + used issued from vault to send webhooks to the ConnectInject. - `secretName` ((#v-global-secretsbackend-vault-connectinject-cacert-secretname)) (`string: null`) - The Vault secret path that contains the CA certificate for - connect inject webhooks. + Connect Inject webhooks. - `tlsCert` ((#v-global-secretsbackend-vault-connectinject-tlscert)) - Configuration to the Vault Secret that Kubernetes uses on Kubernetes pod creation, deletion, and update, to get TLS certificates - used issued from vault to send webhooks to the connect inject. + used issued from vault to send webhooks to the ConnectInject. - `secretName` ((#v-global-secretsbackend-vault-connectinject-tlscert-secretname)) (`string: null`) - The Vault secret path that issues TLS certificates for connect inject webhooks. - `gossipEncryption` ((#v-global-gossipencryption)) - Configures Consul's gossip encryption key. - (Refer to [`-encrypt`](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_encrypt)). + (Refer to [`-encrypt`](/consul/docs/agent/config/cli-flags#_encrypt)). By default, gossip encryption is not enabled. The gossip encryption key may be set automatically or manually. The recommended method is to automatically generate the key. To automatically generate and set a gossip encryption key, set autoGenerate to true. @@ -295,17 +290,17 @@ Use these links to navigate to a particular top-level stanza. - `recursors` ((#v-global-recursors)) (`array: []`) - A list of addresses of upstream DNS servers that are used to recursively resolve DNS queries. These values are given as `-recursor` flags to Consul servers and clients. - Refer to [`-recursor`](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_recursor) for more details. + Refer to [`-recursor`](/consul/docs/agent/config/cli-flags#_recursor) for more details. If this is an empty array (the default), then Consul DNS will only resolve queries for the Consul top level domain (by default `.consul`). - - `tls` ((#v-global-tls)) - Enables [TLS](https://developer.hashicorp.com/consul/tutorials/security/tls-encryption-secure) + - `tls` ((#v-global-tls)) - Enables [TLS](/consul/tutorials/security/tls-encryption-secure) across the cluster to verify authenticity of the Consul servers and clients. Requires Consul v1.4.1+. - `enabled` ((#v-global-tls-enabled)) (`boolean: false`) - If true, the Helm chart will enable TLS for Consul servers and clients and all consul-k8s-control-plane components, as well as generate certificate authority (optional) and server and client certificates. - This setting is required for [Cluster Peering](https://developer.hashicorp.com/consul/docs/connect/cluster-peering/k8s). + This setting is required for [Cluster Peering](/consul/docs/connect/cluster-peering/k8s). - `enableAutoEncrypt` ((#v-global-tls-enableautoencrypt)) (`boolean: false`) - If true, turns on the auto-encrypt feature on clients and servers. It also switches consul-k8s-control-plane components to retrieve the CA from the servers @@ -322,7 +317,7 @@ Use these links to navigate to a particular top-level stanza. - `verify` ((#v-global-tls-verify)) (`boolean: true`) - If true, `verify_outgoing`, `verify_server_hostname`, and `verify_incoming` for internal RPC communication will be set to `true` for Consul servers and clients. Set this to false to incrementally roll out TLS on an existing Consul cluster. - Please refer to [TLS on existing clusters](https://developer.hashicorp.com/consul/docs/k8s/operations/tls-on-existing-cluster) + Please refer to [TLS on existing clusters](/consul/docs/k8s/operations/tls-on-existing-cluster) for more details. - `httpsOnly` ((#v-global-tls-httpsonly)) (`boolean: true`) - If true, the Helm chart will configure Consul to disable the HTTP port on @@ -366,6 +361,15 @@ Use these links to navigate to a particular top-level stanza. - `secretKey` ((#v-global-tls-cakey-secretkey)) (`string: null`) - The key within the Kubernetes or Vault secret that holds the CA key. + - `annotations` ((#v-global-tls-annotations)) (`string: null`) - This value defines additional annotations for + tls init jobs. This should be formatted as a multi-line string. + + ```yaml + annotations: | + "sample/annotation1": "foo" + "sample/annotation2": "bar" + ``` + - `enableConsulNamespaces` ((#v-global-enableconsulnamespaces)) (`boolean: false`) - `enableConsulNamespaces` indicates that you are running Consul Enterprise v1.7+ with a valid Consul Enterprise license and would like to make use of configuration beyond registering everything into @@ -410,6 +414,23 @@ Use these links to navigate to a particular top-level stanza. - `secretKey` ((#v-global-acls-replicationtoken-secretkey)) (`string: null`) - The key within the Kubernetes or Vault secret that holds the replication token. + - `resources` ((#v-global-acls-resources)) (`map`) - The resource requests (CPU, memory, etc.) for the server-acl-init and server-acl-init-cleanup pods. + This should be a YAML map corresponding to a Kubernetes + [`ResourceRequirements``](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#resourcerequirements-v1-core) + object. + + Example: + + ```yaml + resources: + requests: + memory: '200Mi' + cpu: '100m' + limits: + memory: '200Mi' + cpu: '100m' + ``` + - `partitionToken` ((#v-global-acls-partitiontoken)) - partitionToken references a Vault secret containing the ACL token to be used in non-default partitions. This value should only be provided in the default partition and only when setting the `global.secretsBackend.vault.enabled` value to true. @@ -435,6 +456,15 @@ Use these links to navigate to a particular top-level stanza. beta.kubernetes.io/arch: amd64 ``` + - `annotations` ((#v-global-acls-annotations)) (`string: null`) - This value defines additional annotations for + acl init jobs. This should be formatted as a multi-line string. + + ```yaml + annotations: | + "sample/annotation1": "foo" + "sample/annotation2": "bar" + ``` + - `enterpriseLicense` ((#v-global-enterpriselicense)) - This value refers to a Kubernetes or Vault secret that you have created that contains your enterprise license. It is required if you are using an enterprise binary. Defining it here applies it to your cluster once a leader @@ -475,7 +505,7 @@ Use these links to navigate to a particular top-level stanza. This address must be reachable from the Consul servers in the primary datacenter. This auth method will be used to provision ACL tokens for Consul components and is different from the one used by the Consul Service Mesh. - Please refer to the [Kubernetes Auth Method documentation](https://developer.hashicorp.com/consul/docs/security/acl/auth-methods/kubernetes). + Please refer to the [Kubernetes Auth Method documentation](/consul/docs/security/acl/auth-methods/kubernetes). You can retrieve this value from your `kubeconfig` by running: @@ -602,7 +632,7 @@ Use these links to navigate to a particular top-level stanza. Consul server agents. - `replicas` ((#v-server-replicas)) (`integer: 1`) - The number of server agents to run. This determines the fault tolerance of - the cluster. Please refer to the [deployment table](https://developer.hashicorp.com/consul/docs/architecture/consensus#deployment-table) + the cluster. Please refer to the [deployment table](/consul/docs/architecture/consensus#deployment-table) for more information. - `bootstrapExpect` ((#v-server-bootstrapexpect)) (`int: null`) - The number of servers that are expected to be running. @@ -641,7 +671,7 @@ Use these links to navigate to a particular top-level stanza. Vault Secrets backend: If you are using Vault as a secrets backend, a Vault Policy must be created which allows `["create", "update"]` capabilities on the PKI issuing endpoint, which is usually of the form `pki/issue/consul-server`. - Complete [this tutorial](https://developer.hashicorp.com/consul/tutorials/vault-secure/vault-pki-consul-secure-tls) + Complete [this tutorial](/consul/tutorials/vault-secure/vault-pki-consul-secure-tls) to learn how to generate a compatible certificate. Note: when using TLS, both the `server.serverCert` and `global.tls.caCert` which points to the CA endpoint of this PKI engine must be provided. @@ -681,19 +711,19 @@ Use these links to navigate to a particular top-level stanza. storage classes, the PersistentVolumeClaims would need to be manually created. A `null` value will use the Kubernetes cluster's default StorageClass. If a default StorageClass does not exist, you will need to create one. - Refer to the [Read/Write Tuning](https://developer.hashicorp.com/consul/docs/install/performance#read-write-tuning) + Refer to the [Read/Write Tuning](/consul/docs/install/performance#read-write-tuning) section of the Server Performance Requirements documentation for considerations around choosing a performant storage class. - ~> **Note:** The [Reference Architecture](https://developer.hashicorp.com/consul/tutorials/production-deploy/reference-architecture#hardware-sizing-for-consul-servers) + ~> **Note:** The [Reference Architecture](/consul/tutorials/production-deploy/reference-architecture#hardware-sizing-for-consul-servers) contains best practices and recommendations for selecting suitable hardware sizes for your Consul servers. - - `connect` ((#v-server-connect)) (`boolean: true`) - This will enable/disable [service mesh](https://developer.hashicorp.com/consul/docs/connect). Setting this to true + - `connect` ((#v-server-connect)) (`boolean: true`) - This will enable/disable [service mesh](/consul/docs/connect). Setting this to true _will not_ automatically secure pod communication, this setting will only enable usage of the feature. Consul will automatically initialize a new CA and set of certificates. Additional service mesh settings can be configured - by setting the `server.extraConfig` value. + by setting the `server.extraConfig` value or by applying [configuration entries](/consul/docs/connect/config-entries). - `serviceAccount` ((#v-server-serviceaccount)) @@ -716,10 +746,10 @@ Use these links to navigate to a particular top-level stanza. ```yaml resources: requests: - memory: '100Mi' + memory: '200Mi' cpu: '100m' limits: - memory: '100Mi' + memory: '200Mi' cpu: '100m' ``` @@ -737,11 +767,15 @@ Use these links to navigate to a particular top-level stanza. - `server` ((#v-server-containersecuritycontext-server)) (`map`) - The consul server agent container + - `aclInit` ((#v-server-containersecuritycontext-aclinit)) (`map`) - The acl-init job + + - `tlsInit` ((#v-server-containersecuritycontext-tlsinit)) (`map`) - The tls-init job + - `updatePartition` ((#v-server-updatepartition)) (`integer: 0`) - This value is used to carefully control a rolling update of Consul server agents. This value specifies the [partition](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions) for performing a rolling update. Please read the linked Kubernetes - and [Upgrade Consul](https://developer.hashicorp.com/consul/docs/k8s/upgrade#upgrading-consul-servers) + and [Upgrade Consul](/consul/docs/k8s/upgrade#upgrading-consul-servers) documentation for more information. - `disruptionBudget` ((#v-server-disruptionbudget)) - This configures the [`PodDisruptionBudget`](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) @@ -757,7 +791,7 @@ Use these links to navigate to a particular top-level stanza. --set 'server.disruptionBudget.maxUnavailable=0'` flag to the helm chart installation command because of a limitation in the Helm templating language. - - `extraConfig` ((#v-server-extraconfig)) (`string: {}`) - A raw string of extra [JSON configuration](https://developer.hashicorp.com/consul/docs/agent/config/config-files) for Consul + - `extraConfig` ((#v-server-extraconfig)) (`string: {}`) - A raw string of extra [JSON configuration](/consul/docs/agent/config/config-files) for Consul servers. This will be saved as-is into a ConfigMap that is read by the Consul server agents. This can be used to add additional configuration that isn't directly exposed by the chart. @@ -934,18 +968,18 @@ Use these links to navigate to a particular top-level stanza. it could be used to configure custom consul parameters. - `snapshotAgent` ((#v-server-snapshotagent)) - Values for setting up and running - [snapshot agents](https://developer.hashicorp.com/consul/commands/snapshot/agent) + [snapshot agents](/consul/commands/snapshot/agent) within the Consul clusters. They run as a sidecar with Consul servers. - `enabled` ((#v-server-snapshotagent-enabled)) (`boolean: false`) - If true, the chart will install resources necessary to run the snapshot agent. - `interval` ((#v-server-snapshotagent-interval)) (`string: 1h`) - Interval at which to perform snapshots. - Refer to [`interval`](https://developer.hashicorp.com/consul/commands/snapshot/agent#interval) + Refer to [`interval`](/consul/commands/snapshot/agent#interval) - `configSecret` ((#v-server-snapshotagent-configsecret)) - A Kubernetes or Vault secret that should be manually created to contain the entire config to be used on the snapshot agent. This is the preferred method of configuration since there are usually storage - credentials present. Please refer to the [Snapshot agent config](https://developer.hashicorp.com/consul/commands/snapshot/agent#config-file-options) + credentials present. Please refer to the [Snapshot agent config](/consul/commands/snapshot/agent#config-file-options) for details. - `secretName` ((#v-server-snapshotagent-configsecret-secretname)) (`string: null`) - The name of the Kubernetes secret or Vault secret path that holds the snapshot agent config. @@ -966,6 +1000,87 @@ Use these links to navigate to a particular top-level stanza. ... ``` + - `auditLogs` ((#v-server-auditlogs)) - Added in Consul 1.8, the audit object allow users to enable auditing + and configure a sink and filters for their audit logs. Please refer to + [audit logs](/consul/docs/enterprise/audit-logging) documentation + for further information. + + - `enabled` ((#v-server-auditlogs-enabled)) (`boolean: false`) - Controls whether Consul logs out each time a user performs an operation. + global.acls.manageSystemACLs must be enabled to use this feature. + + - `sinks` ((#v-server-auditlogs-sinks)) (`array`) - A single entry of the sink object provides configuration for the destination to which Consul + will log auditing events. + + Example: + + ```yaml + sinks: + - name: My Sink + type: file + format: json + path: /tmp/audit.json + delivery_guarantee: best-effort + rotate_duration: 24h + rotate_max_files: 15 + rotate_bytes: 25165824 + + ``` + + The sink object supports the following keys: + + - `name` - Name of the sink. + + - `type` - Type specifies what kind of sink this is. Currently only file sinks are available + + - `format` - Format specifies what format the events will be emitted with. Currently only `json` + events are emitted. + + - `path` - The directory and filename to write audit events to. + + - `delivery_guarantee` - Specifies the rules governing how audit events are written. Consul + only supports `best-effort` event delivery. + + - `mode` - The permissions to set on the audit log files. + + - `rotate_duration` - Specifies the interval by which the system rotates to a new log file. + At least one of `rotate_duration` or `rotate_bytes` must be configured to enable audit logging. + + - `rotate_bytes` - Specifies how large an individual log file can grow before Consul rotates to a new file. + At least one of rotate_bytes or rotate_duration must be configured to enable audit logging. + + - `rotate_max_files` - Defines the limit that Consul should follow before it deletes old log files. + + - `limits` ((#v-server-limits)) - Settings for potentially limiting timeouts, rate limiting on clients as well + as servers, and other settings to limit exposure too many requests, requests + waiting for too long, and other runtime considerations. + + - `requestLimits` ((#v-server-limits-requestlimits)) - This object specifies configurations that limit the rate of RPC and gRPC + requests on the Consul server. Limiting the rate of gRPC and RPC requests + also limits HTTP requests to the Consul server. + /consul/docs/agent/config/config-files#request_limits + + - `mode` ((#v-server-limits-requestlimits-mode)) (`string: disabled`) - Setting for disabling or enabling rate limiting. If not disabled, it + enforces the action that will occur when RequestLimitsReadRate + or RequestLimitsWriteRate is exceeded. The default value of "disabled" will + prevent any rate limiting from occuring. A value of "enforce" will block + the request from processings by returning an error. A value of + "permissive" will not block the request and will allow the request to + continue processing. + + - `readRate` ((#v-server-limits-requestlimits-readrate)) (`integer: -1`) - Setting that controls how frequently RPC, gRPC, and HTTP + queries are allowed to happen. In any large enough time interval, rate + limiter limits the rate to RequestLimitsReadRate tokens per second. + + See https://en.wikipedia.org/wiki/Token_bucket for more about token + buckets. + + - `writeRate` ((#v-server-limits-requestlimits-writerate)) (`integer: -1`) - Setting that controls how frequently RPC, gRPC, and HTTP + writes are allowed to happen. In any large enough time interval, rate + limiter limits the rate to RequestLimitsWriteRate tokens per second. + + See https://en.wikipedia.org/wiki/Token_bucket for more about token + buckets. + ### externalServers ((#h-externalservers)) - `externalServers` ((#v-externalservers)) - Configuration for Consul servers when the servers are running outside of Kubernetes. @@ -1003,7 +1118,7 @@ Use these links to navigate to a particular top-level stanza. - `k8sAuthMethodHost` ((#v-externalservers-k8sauthmethodhost)) (`string: null`) - If you are setting `global.acls.manageSystemACLs` and `connectInject.enabled` to true, set `k8sAuthMethodHost` to the address of the Kubernetes API server. This address must be reachable from the Consul servers. - Please refer to the [Kubernetes Auth Method documentation](https://developer.hashicorp.com/consul/docs/security/acl/auth-methods/kubernetes). + Please refer to the [Kubernetes Auth Method documentation](/consul/docs/security/acl/auth-methods/kubernetes). You could retrieve this value from your `kubeconfig` by running: @@ -1026,7 +1141,7 @@ Use these links to navigate to a particular top-level stanza. - `image` ((#v-client-image)) (`string: null`) - The name of the Docker image (including any tag) for the containers running Consul client agents. - - `join` ((#v-client-join)) (`array: null`) - A list of valid [`-retry-join` values](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_retry_join). + - `join` ((#v-client-join)) (`array: null`) - A list of valid [`-retry-join` values](/consul/docs/agent/config/cli-flags#_retry_join). If this is `null` (default), then the clients will attempt to automatically join the server cluster running within Kubernetes. This means that with `server.enabled` set to true, clients will automatically @@ -1044,10 +1159,10 @@ Use these links to navigate to a particular top-level stanza. - `grpc` ((#v-client-grpc)) (`boolean: true`) - If true, agents will enable their GRPC listener on port 8502 and expose it to the host. This will use slightly more resources, but is - required for service mesh. + required for Connect. - `nodeMeta` ((#v-client-nodemeta)) - nodeMeta specifies an arbitrary metadata key/value pair to associate with the node - (refer to [`-node-meta`](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_node_meta)) + (refer to [`-node-meta`](/consul/docs/agent/config/cli-flags#_node_meta)) - `pod-name` ((#v-client-nodemeta-pod-name)) (`string: ${HOSTNAME}`) @@ -1091,7 +1206,7 @@ Use these links to navigate to a particular top-level stanza. - `tlsInit` ((#v-client-containersecuritycontext-tlsinit)) (`map`) - The tls-init initContainer - - `extraConfig` ((#v-client-extraconfig)) (`string: {}`) - A raw string of extra [JSON configuration](https://developer.hashicorp.com/consul/docs/agent/config/config-files) for Consul + - `extraConfig` ((#v-client-extraconfig)) (`string: {}`) - A raw string of extra [JSON configuration](/consul/docs/agent/config/config-files) for Consul clients. This will be saved as-is into a ConfigMap that is read by the Consul client agents. This can be used to add additional configuration that isn't directly exposed by the chart. @@ -1357,16 +1472,16 @@ Use these links to navigate to a particular top-level stanza. will inherit from `global.metrics.enabled` value. - `provider` ((#v-ui-metrics-provider)) (`string: prometheus`) - Provider for metrics. Refer to - [`metrics_provider`](https://developer.hashicorp.com/consul/docs/agent/config/config-files#ui_config_metrics_provider) + [`metrics_provider`](/consul/docs/agent/config/config-files#ui_config_metrics_provider) This value is only used if `ui.enabled` is set to true. - `baseURL` ((#v-ui-metrics-baseurl)) (`string: http://prometheus-server`) - baseURL is the URL of the prometheus server, usually the service URL. This value is only used if `ui.enabled` is set to true. - - `dashboardURLTemplates` ((#v-ui-dashboardurltemplates)) - Corresponds to [`dashboard_url_templates`](https://developer.hashicorp.com/consul/docs/agent/config/config-files#ui_config_dashboard_url_templates) + - `dashboardURLTemplates` ((#v-ui-dashboardurltemplates)) - Corresponds to [`dashboard_url_templates`](/consul/docs/agent/config/config-files#ui_config_dashboard_url_templates) configuration. - - `service` ((#v-ui-dashboardurltemplates-service)) (`string: ""`) - Sets [`dashboardURLTemplates.service`](https://developer.hashicorp.com/consul/docs/agent/config/config-files#ui_config_dashboard_url_templates_service). + - `service` ((#v-ui-dashboardurltemplates-service)) (`string: ""`) - Sets [`dashboardURLTemplates.service`](/consul/docs/agent/config/config-files#ui_config_dashboard_url_templates_service). ### syncCatalog ((#h-synccatalog)) @@ -1386,7 +1501,7 @@ Use these links to navigate to a particular top-level stanza. to run the sync program. - `default` ((#v-synccatalog-default)) (`boolean: true`) - If true, all valid services in K8S are - synced by default. If false, the service must be [annotated](https://developer.hashicorp.com/consul/docs/k8s/service-sync#enable-and-disable-sync) + synced by default. If false, the service must be [annotated](/consul/docs/k8s/service-sync#enable-and-disable-sync) properly to sync. In either case an annotation can override the default. @@ -1568,9 +1683,9 @@ Use these links to navigate to a particular top-level stanza. ### connectInject ((#h-connectinject)) -- `connectInject` ((#v-connectinject)) - Configures the automatic service mesh sidecar injector. +- `connectInject` ((#v-connectinject)) - Configures the automatic Connect sidecar injector. - - `enabled` ((#v-connectinject-enabled)) (`boolean: true`) - True if you want to enable service mesh sidecar injection. Set to "-" to inherit from + - `enabled` ((#v-connectinject-enabled)) (`boolean: true`) - True if you want to enable connect injection. Set to "-" to inherit from global.enabled. - `replicas` ((#v-connectinject-replicas)) (`integer: 1`) - The number of deployment replicas. @@ -1579,14 +1694,14 @@ Use these links to navigate to a particular top-level stanza. - `default` ((#v-connectinject-default)) (`boolean: false`) - If true, the injector will inject the Connect sidecar into all pods by default. Otherwise, pods must specify the - [injection annotation](https://developer.hashicorp.com/consul/docs/k8s/connect#consul-hashicorp-com-connect-inject) - to opt-in to service mesh sidecar injection. If this is true, pods can use the same annotation + [injection annotation](/consul/docs/k8s/connect#consul-hashicorp-com-connect-inject) + to opt-in to Connect injection. If this is true, pods can use the same annotation to explicitly opt-out of injection. - `transparentProxy` ((#v-connectinject-transparentproxy)) - Configures Transparent Proxy for Consul Service mesh services. Using this feature requires Consul 1.10.0-beta1+. - - `defaultEnabled` ((#v-connectinject-transparentproxy-defaultenabled)) (`boolean: true`) - If true, then all Consul service mesh will run with transparent proxy enabled by default, + - `defaultEnabled` ((#v-connectinject-transparentproxy-defaultenabled)) (`boolean: true`) - If true, then all Consul Service mesh will run with transparent proxy enabled by default, i.e. we enforce that all traffic within the pod will go through the proxy. This value is overridable via the "consul.hashicorp.com/transparent-proxy" pod annotation. @@ -1613,6 +1728,64 @@ Use these links to navigate to a particular top-level stanza. - `minAvailable` ((#v-connectinject-disruptionbudget-minavailable)) (`integer: null`) - The minimum number of available pods. Takes precedence over maxUnavailable if set. + - `apiGateway` ((#v-connectinject-apigateway)) - Configuration settings for the Consul API Gateway integration. + + - `manageExternalCRDs` ((#v-connectinject-apigateway-manageexternalcrds)) (`boolean: true`) - Enables Consul on Kubernetes to manage the CRDs used for Gateway API. + Setting this to true will install the CRDs used for the Gateway API when Consul on Kubernetes is installed. + These CRDs can clash with existing Gateway API CRDs if they are already installed in your cluster. + If this setting is false, you will need to install the Gateway API CRDs manually. + + - `managedGatewayClass` ((#v-connectinject-apigateway-managedgatewayclass)) - Configuration settings for the GatewayClass installed by Consul on Kubernetes. + + - `nodeSelector` ((#v-connectinject-apigateway-managedgatewayclass-nodeselector)) (`string: null`) - This value defines [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) + labels for gateway pod assignment, formatted as a multi-line string. + + Example: + + ```yaml + nodeSelector: | + beta.kubernetes.io/arch: amd64 + ``` + + - `tolerations` ((#v-connectinject-apigateway-managedgatewayclass-tolerations)) (`string: null`) - Toleration settings for gateway pods created with the managed gateway class. + This should be a multi-line string matching the + [Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) array in a Pod spec. + + - `serviceType` ((#v-connectinject-apigateway-managedgatewayclass-servicetype)) (`string: LoadBalancer`) - This value defines the type of Service created for gateways (e.g. LoadBalancer, ClusterIP) + + - `copyAnnotations` ((#v-connectinject-apigateway-managedgatewayclass-copyannotations)) - Configuration settings for annotations to be copied from the Gateway to other child resources. + + - `service` ((#v-connectinject-apigateway-managedgatewayclass-copyannotations-service)) (`string: null`) - This value defines a list of annotations to be copied from the Gateway to the Service created, formatted as a multi-line string. + + Example: + + ```yaml + service: + annotations: | + - external-dns.alpha.kubernetes.io/hostname + ``` + + - `deployment` ((#v-connectinject-apigateway-managedgatewayclass-deployment)) - This value defines the number of pods to deploy for each Gateway as well as a min and max number of pods for all Gateways + + - `defaultInstances` ((#v-connectinject-apigateway-managedgatewayclass-deployment-defaultinstances)) (`integer: 1`) + + - `maxInstances` ((#v-connectinject-apigateway-managedgatewayclass-deployment-maxinstances)) (`integer: 1`) + + - `minInstances` ((#v-connectinject-apigateway-managedgatewayclass-deployment-mininstances)) (`integer: 1`) + + - `serviceAccount` ((#v-connectinject-apigateway-serviceaccount)) - Configuration for the ServiceAccount created for the api-gateway component + + - `annotations` ((#v-connectinject-apigateway-serviceaccount-annotations)) (`string: null`) - This value defines additional annotations for the client service account. This should be formatted as a multi-line + string. + + ```yaml + annotations: | + "sample/annotation1": "foo" + "sample/annotation2": "bar" + ``` + + - `resources` ((#v-connectinject-apigateway-resources)) (`map`) - The resource settings for Pods handling traffic for Gateway API. + - `cni` ((#v-connectinject-cni)) - Configures consul-cni plugin for Consul Service mesh services - `enabled` ((#v-connectinject-cni-enabled)) (`boolean: false`) - If true, then all traffic redirection setup uses the consul-cni plugin. @@ -1681,7 +1854,7 @@ Use these links to navigate to a particular top-level stanza. persistent: true ``` - - `metrics` ((#v-connectinject-metrics)) - Configures metrics for services in the Consul service mesh. All values are overridable + - `metrics` ((#v-connectinject-metrics)) - Configures metrics for Consul service mesh services. All values are overridable via annotations on a per-pod basis. - `defaultEnabled` ((#v-connectinject-metrics-defaultenabled)) (`string: -`) - If true, the connect-injector will automatically @@ -1690,14 +1863,14 @@ Use these links to navigate to a particular top-level stanza. metrics will depend on whether metrics merging is enabled: - If metrics merging is enabled: the consul-dataplane will run a merged metrics server - combining Envoy sidecar and mesh service metrics, + combining Envoy sidecar and Connect service metrics, i.e. if your service exposes its own Prometheus metrics. - If metrics merging is disabled: the listener will just expose Envoy sidecar metrics. This will inherit from `global.metrics.enabled`. - `defaultEnableMerging` ((#v-connectinject-metrics-defaultenablemerging)) (`boolean: false`) - Configures the consul-dataplane to run a merged metrics server - to combine and serve both Envoy and mesh service metrics. + to combine and serve both Envoy and Connect service metrics. This feature is available only in Consul v1.10.0 or greater. - `defaultMergedMetricsPort` ((#v-connectinject-metrics-defaultmergedmetricsport)) (`integer: 20100`) - Configures the port at which the consul-dataplane will listen on to return @@ -1763,13 +1936,13 @@ Use these links to navigate to a particular top-level stanza. - `requests` ((#v-connectinject-resources-requests)) - - `memory` ((#v-connectinject-resources-requests-memory)) (`string: 50Mi`) - Recommended production default: 500Mi + - `memory` ((#v-connectinject-resources-requests-memory)) (`string: 200Mi`) - Recommended production default: 500Mi - `cpu` ((#v-connectinject-resources-requests-cpu)) (`string: 50m`) - Recommended production default: 250m - `limits` ((#v-connectinject-resources-limits)) - - `memory` ((#v-connectinject-resources-limits-memory)) (`string: 50Mi`) - Recommended production default: 500Mi + - `memory` ((#v-connectinject-resources-limits-memory)) (`string: 200Mi`) - Recommended production default: 500Mi - `cpu` ((#v-connectinject-resources-limits-cpu)) (`string: 50m`) - Recommended production default: 250m @@ -1798,13 +1971,13 @@ Use these links to navigate to a particular top-level stanza. namespace-label: label-value ``` - - `k8sAllowNamespaces` ((#v-connectinject-k8sallownamespaces)) (`array: ["*"]`) - List of k8s namespaces to allow service mesh sidecar + - `k8sAllowNamespaces` ((#v-connectinject-k8sallownamespaces)) (`array: ["*"]`) - List of k8s namespaces to allow Connect sidecar injection in. If a k8s namespace is not included or is listed in `k8sDenyNamespaces`, pods in that k8s namespace will not be injected even if they are explicitly annotated. Use `["*"]` to automatically allow all k8s namespaces. For example, `["namespace1", "namespace2"]` will only allow pods in the k8s - namespaces `namespace1` and `namespace2` to have service mesh sidecars injected + namespaces `namespace1` and `namespace2` to have Consul service mesh sidecars injected and registered with Consul. All other k8s namespaces will be ignored. To deny all namespaces, set this to `[]`. @@ -1813,7 +1986,7 @@ Use these links to navigate to a particular top-level stanza. `namespaceSelector` takes precedence over both since it is applied first. `kube-system` and `kube-public` are never injected, even if included here. - - `k8sDenyNamespaces` ((#v-connectinject-k8sdenynamespaces)) (`array: []`) - List of k8s namespaces that should not allow service mesh + - `k8sDenyNamespaces` ((#v-connectinject-k8sdenynamespaces)) (`array: []`) - List of k8s namespaces that should not allow Connect sidecar injection. This list takes precedence over `k8sAllowNamespaces`. `*` is not supported because then nothing would be allowed to be injected. @@ -1869,8 +2042,8 @@ Use these links to navigate to a particular top-level stanza. If set to an empty string all service accounts can log in. This only has effect if ACLs are enabled. - Refer to Auth methods [Binding rules](https://developer.hashicorp.com/consul/docs/security/acl/auth-methods#binding-rules) - and [Trusted identiy attributes](https://developer.hashicorp.com/consul/docs/security/acl/auth-methods/kubernetes#trusted-identity-attributes) + Refer to Auth methods [Binding rules](/consul/docs/security/acl/auth-methods#binding-rules) + and [Trusted identiy attributes](/consul/docs/security/acl/auth-methods/kubernetes#trusted-identity-attributes) for more details. Requires Consul >= v1.5. @@ -1878,7 +2051,7 @@ Use these links to navigate to a particular top-level stanza. auth method for Connect inject, set this to the name of your auth method. - `aclInjectToken` ((#v-connectinject-aclinjecttoken)) - Refers to a Kubernetes secret that you have created that contains - an ACL token for your Consul cluster which allows the connect injector the correct + an ACL token for your Consul cluster which allows the Connect injector the correct permissions. This is only needed if Consul namespaces and ACLs are enabled on the Consul cluster and you are not setting `global.acls.manageSystemACLs` to `true`. @@ -1922,7 +2095,26 @@ Use these links to navigate to a particular top-level stanza. - `cpu` ((#v-connectinject-sidecarproxy-resources-limits-cpu)) (`string: null`) - Recommended production default: 100m - - `initContainer` ((#v-connectinject-initcontainer)) (`map`) - The resource settings for the connect injected init container. If null, the resources + - `lifecycle` ((#v-connectinject-sidecarproxy-lifecycle)) (`map`) - Set default lifecycle management configuration for sidecar proxy. + These settings can be overridden on a per-pod basis via these annotations: + + - `consul.hashicorp.com/enable-sidecar-proxy-lifecycle` + - `consul.hashicorp.com/enable-sidecar-proxy-shutdown-drain-listeners` + - `consul.hashicorp.com/sidecar-proxy-lifecycle-shutdown-grace-period-seconds` + - `consul.hashicorp.com/sidecar-proxy-lifecycle-graceful-port` + - `consul.hashicorp.com/sidecar-proxy-lifecycle-graceful-shutdown-path` + + - `defaultEnabled` ((#v-connectinject-sidecarproxy-lifecycle-defaultenabled)) (`boolean: true`) + + - `defaultEnableShutdownDrainListeners` ((#v-connectinject-sidecarproxy-lifecycle-defaultenableshutdowndrainlisteners)) (`boolean: true`) + + - `defaultShutdownGracePeriodSeconds` ((#v-connectinject-sidecarproxy-lifecycle-defaultshutdowngraceperiodseconds)) (`integer: 30`) + + - `defaultGracefulPort` ((#v-connectinject-sidecarproxy-lifecycle-defaultgracefulport)) (`integer: 20600`) + + - `defaultGracefulShutdownPath` ((#v-connectinject-sidecarproxy-lifecycle-defaultgracefulshutdownpath)) (`string: /graceful_shutdown`) + + - `initContainer` ((#v-connectinject-initcontainer)) (`map`) - The resource settings for the Connect injected init container. If null, the resources won't be set for the initContainer. The defaults are optimized for developer instances of Kubernetes, however they should be tweaked with the recommended defaults as shown below to speed up service registration times. @@ -1942,11 +2134,11 @@ Use these links to navigate to a particular top-level stanza. ### meshGateway ((#h-meshgateway)) -- `meshGateway` ((#v-meshgateway)) - [Mesh Gateways](https://developer.hashicorp.com/consul/docs/connect/gateways/mesh-gateway) enable Consul service mesh to work across Consul datacenters. +- `meshGateway` ((#v-meshgateway)) - [Mesh Gateways](/consul/docs/connect/gateways/mesh-gateway) enable Consul Connect to work across Consul datacenters. - - `enabled` ((#v-meshgateway-enabled)) (`boolean: false`) - If [mesh gateways](https://developer.hashicorp.com/consul/docs/connect/gateways/mesh-gateway) are enabled, a Deployment will be created that runs + - `enabled` ((#v-meshgateway-enabled)) (`boolean: false`) - If [mesh gateways](/consul/docs/connect/gateways/mesh-gateway) are enabled, a Deployment will be created that runs gateways and Consul service mesh will be configured to use gateways. - This setting is required for [cluster peering](https://developer.hashicorp.com/consul/docs/connect/cluster-peering/k8s). + This setting is required for [Cluster Peering](/consul/docs/connect/cluster-peering/k8s). Requirements: consul 1.6.0+ if using `global.acls.manageSystemACLs``. - `replicas` ((#v-meshgateway-replicas)) (`integer: 1`) - Number of replicas for the Deployment. @@ -2110,8 +2302,7 @@ Use these links to navigate to a particular top-level stanza. for a specific gateway. Requirements: consul >= 1.8.0 - - `enabled` ((#v-ingressgateways-enabled)) (`boolean: false`) - Enable ingress gateway deployment. Requires `connectInject.enabled=true` - and `client.enabled=true`. + - `enabled` ((#v-ingressgateways-enabled)) (`boolean: false`) - Enable ingress gateway deployment. Requires `connectInject.enabled=true`. - `defaults` ((#v-ingressgateways-defaults)) - Defaults sets default values for all gateway fields. With the exception of annotations, defining any of these values in the `gateways` list @@ -2228,7 +2419,7 @@ Use these links to navigate to a particular top-level stanza. `defaults`. Values defined here override the defaults except in the case of annotations where both will be applied. - - `name` ((#v-ingressgateways-gateways-name)) (`string: ingress-gateway`) + - `name` ((#v-ingressgateways-gateways-name)) (`string: ingress-gateway`) ### terminatingGateways ((#h-terminatinggateways)) @@ -2240,8 +2431,7 @@ Use these links to navigate to a particular top-level stanza. for a specific gateway. Requirements: consul >= 1.8.0 - - `enabled` ((#v-terminatinggateways-enabled)) (`boolean: false`) - Enable terminating gateway deployment. Requires `connectInject.enabled=true` - and `client.enabled=true`. + - `enabled` ((#v-terminatinggateways-enabled)) (`boolean: false`) - Enable terminating gateway deployment. Requires `connectInject.enabled=true`. - `defaults` ((#v-terminatinggateways-defaults)) - Defaults sets default values for all gateway fields. With the exception of annotations, defining any of these values in the `gateways` list @@ -2344,11 +2534,12 @@ Use these links to navigate to a particular top-level stanza. `defaults`. Values defined here override the defaults except in the case of annotations where both will be applied. - - `name` ((#v-terminatinggateways-gateways-name)) (`string: terminating-gateway`) + - `name` ((#v-terminatinggateways-gateways-name)) (`string: terminating-gateway`) ### apiGateway ((#h-apigateway)) -- `apiGateway` ((#v-apigateway)) - Configuration settings for the Consul API Gateway integration +- `apiGateway` ((#v-apigateway)) - [DEPRECATED] Use connectInject.apiGateway instead. This stanza will be removed with the release of Consul 1.17 + Configuration settings for the Consul API Gateway integration - `enabled` ((#v-apigateway-enabled)) (`boolean: false`) - When true the helm chart will install the Consul API Gateway controller From 3dc6f8fc0642b779a207ce308b1bf91a5750ea11 Mon Sep 17 00:00:00 2001 From: Nick Irvine <115657443+nfi-hashicorp@users.noreply.github.com> Date: Tue, 11 Jul 2023 17:13:54 -0700 Subject: [PATCH 15/43] ci: use gotestsum v1.10.1 [NET-4042] (#18088) --- .github/workflows/reusable-unit-split.yml | 2 +- .github/workflows/reusable-unit.yml | 2 +- .github/workflows/test-integrations.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/reusable-unit-split.yml b/.github/workflows/reusable-unit-split.yml index e2da1920967e..7750627f4253 100644 --- a/.github/workflows/reusable-unit-split.yml +++ b/.github/workflows/reusable-unit-split.yml @@ -46,7 +46,7 @@ on: required: true env: TEST_RESULTS: /tmp/test-results - GOTESTSUM_VERSION: 1.8.2 + GOTESTSUM_VERSION: "1.10.1" GOARCH: ${{inputs.go-arch}} TOTAL_RUNNERS: ${{inputs.runner-count}} CONSUL_LICENSE: ${{secrets.consul-license}} diff --git a/.github/workflows/reusable-unit.yml b/.github/workflows/reusable-unit.yml index 3f7ffa277412..c066cad3f48d 100644 --- a/.github/workflows/reusable-unit.yml +++ b/.github/workflows/reusable-unit.yml @@ -42,7 +42,7 @@ on: required: true env: TEST_RESULTS: /tmp/test-results - GOTESTSUM_VERSION: 1.8.2 + GOTESTSUM_VERSION: "1.10.1" GOARCH: ${{inputs.go-arch}} CONSUL_LICENSE: ${{secrets.consul-license}} GOTAGS: ${{ inputs.go-tags}} diff --git a/.github/workflows/test-integrations.yml b/.github/workflows/test-integrations.yml index 641533012db4..263a2e41e4ae 100644 --- a/.github/workflows/test-integrations.yml +++ b/.github/workflows/test-integrations.yml @@ -19,7 +19,7 @@ env: TEST_RESULTS_ARTIFACT_NAME: test-results CONSUL_LICENSE: ${{ secrets.CONSUL_LICENSE }} GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} - GOTESTSUM_VERSION: "1.9.0" + GOTESTSUM_VERSION: "1.10.1" CONSUL_BINARY_UPLOAD_NAME: consul-bin # strip the hashicorp/ off the front of github.repository for consul CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'consul' }} From 51d8eb8e07f3ecf251380c90079ad7f984c734e3 Mon Sep 17 00:00:00 2001 From: Curt Bushko Date: Tue, 11 Jul 2023 23:11:38 -0400 Subject: [PATCH 16/43] Docs: Update proxy lifecycle annotations and consul-dataplane flags (#18075) * Update proxy lifecycle annotations and consul-dataplane flags --- .../connect/dataplane/consul-dataplane.mdx | 4 ++ .../docs/k8s/annotations-and-labels.mdx | 42 +++++++++++-------- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/website/content/docs/connect/dataplane/consul-dataplane.mdx b/website/content/docs/connect/dataplane/consul-dataplane.mdx index ab59a5ba60cc..cf0ae4332120 100644 --- a/website/content/docs/connect/dataplane/consul-dataplane.mdx +++ b/website/content/docs/connect/dataplane/consul-dataplane.mdx @@ -54,6 +54,8 @@ The following options are required when starting `consul-dataplane` with the CLI - `-envoy-concurrency` - The number of worker threads that Envoy uses. Default is `2`. Accepted environment variable is `DP_ENVOY_CONCURRENCY`. - `-envoy-ready-bind-address` - The address Envoy's readiness probe is available on. Accepted environment variable is `DP_ENVOY_READY_BIND_ADDRESS`. - `-envoy-ready-bind-port` - The port Envoy's readiness probe is available on. Accepted environment variable is `DP_ENVOY_READY_BIND_PORT`. +- `-graceful-port` - The port to serve HTTP endpoints for graceful operations. Accepted environment variable is `DP_GRACEFUL_PORT`. +- `-graceful-shutdown-path` - The HTTP path to serve the graceful shutdown endpoint. Accepted environment variable is `DP_GRACEFUL_SHUTDOWN_PATH`. - `-grpc-port` - The Consul server gRPC port to which `consul-dataplane` connects. Default is `8502`. Accepted environment variable is `DP_CONSUL_GRPC_PORT`. - `-log-json` - Enables log messages in JSON format. Default is `false`. Accepted environment variable is `DP_LOG_JSON`. - `-log-level` - Log level of the messages to print. Available log levels are `"trace"`, `"debug"`, `"info"`, `"warn"`, and `"error"`. Default is `"info"`. Accepted environment variable is `DP_LOG_LEVEL`. @@ -71,6 +73,8 @@ The following options are required when starting `consul-dataplane` with the CLI - `-service-node-id` - The ID of the Consul node to which the proxy service instance is registered. Accepted environment variable is `DP_SERVICE_NODE_ID`. - `-service-node-name` - The name of the Consul node to which the proxy service instance is registered. Accepted environment variable is `DP_SERVICE_NODE_NAME`. - `-service-partition` - The Consul Enterprise partition in which the proxy service instance is registered. Accepted environment variable is `DP_SERVICE_PARTITION`. +- `-shutdown-drain-listeners` - Wait for proxy listeners to drain before terminating the proxy container. Accepted environment variable is `DP_SHUTDOWN_DRAIN_LISTENERS`. +- `-shutdown-grace-period-seconds` - Amount of time to wait after receiving a SIGTERM signal before terminating the proxy. Accepted environment variable is `DP_SHUTDOWN_GRACE_PERIOD_SECONDS`. - `-static-token` - The ACL token used to authenticate requests to Consul servers when `-credential-type` is set to `"static"`. Accepted environment variable is `DP_CREDENTIAL_STATIC_TOKEN`. - `-telemetry-prom-ca-certs-path` - The path to a file or directory containing CA certificates used to verify the Prometheus server's certificate. Accepted environment variable is `DP_TELEMETRY_PROM_CA_CERTS_PATH`. - `-telemetry-prom-cert-file` - The path to the client certificate used to serve Prometheus metrics. Accepted environment variable is `DP_TELEMETRY_PROM_CERT_FILE`. diff --git a/website/content/docs/k8s/annotations-and-labels.mdx b/website/content/docs/k8s/annotations-and-labels.mdx index 56d0aa6006f1..0735ede6cce1 100644 --- a/website/content/docs/k8s/annotations-and-labels.mdx +++ b/website/content/docs/k8s/annotations-and-labels.mdx @@ -91,38 +91,38 @@ The following Kubernetes resource annotations could be used on a pod to control annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name].svc:[port]" ``` - + - Peer or datacenter: Place the peer or datacenter after `svc.` followed by either `peer` or `dc` and the port number. - + ```yaml annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name].svc.[service-peer].peer:[port]" ``` - + ```yaml annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name].svc.[service-dc].dc:[port]" ``` - + - Namespace (requires Consul Enterprise): Place the namespace after `svc.` followed by `ns` and the port number. - + ```yaml annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name].svc.[service-namespace].ns:[port]" ``` - + When namespaces are enabled, you must include the namespace in the annotation before specifying a cluster peer, WAN-federated datacenter, or admin partition in the same datacenter. - + ```yaml annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name].svc.[service-namespace].ns.[service-peer].peer:[port]" ``` - + ```yaml annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name].svc.[service-namespace].ns.[service-partition].ap:[port]" ``` - + ```yaml annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name].svc.[service-namespace].ns.[service-dc].dc:[port]" @@ -132,7 +132,7 @@ The following Kubernetes resource annotations could be used on a pod to control The unlabeled annotation format allows you to reference any service not in a cluster peer as an upstream. You can specify a Consul Enterprise namespace. You can also specify an admin partition in the same datacenter or a WAN-federated datacenter. Unlike the labeled annotation, you can also reference a prepared query as an upstream. - Service name: Place the service name at the beginning of the annotation to specify the upstream service. You also have the option to append the WAN federated datacenter where the service is deployed. - + ```yaml annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name]:[port]:[optional datacenter]" @@ -140,7 +140,7 @@ The following Kubernetes resource annotations could be used on a pod to control - Namespace: Upstream services may be running in a different namespace. Place the upstream namespace after the service name. For additional details about configuring the injector, refer to [Consul Enterprise namespaces](#consul-enterprise-namespaces) . - + ```yaml annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name].[service-namespace]:[port]:[optional datacenter]" @@ -158,7 +158,7 @@ The following Kubernetes resource annotations could be used on a pod to control annotations: "consul.hashicorp.com/connect-service-upstreams":"[service-name].[service-namespace].[service-partition]:[port]:[optional datacenter]" ``` - + - Prepared queries: To reference a [prepared query](/consul/api-docs/query) in an upstream annotation, prepend the annotation with `prepared_query` and then invoke the name of the query. @@ -166,7 +166,7 @@ The following Kubernetes resource annotations could be used on a pod to control annotations: 'consul.hashicorp.com/connect-service-upstreams': 'prepared_query:[query name]:[port]' ``` - + - **Multiple upstreams**: Delimit multiple services or upstreams with commas. You can specify any of the unlabeled, labeled, or prepared query formats when using the supported versions for the formats. ```yaml @@ -239,6 +239,12 @@ The following Kubernetes resource annotations could be used on a pod to control - `consul.hashicorp.com/consul-sidecar-memory-limit` - Override the default memory limit. - `consul.hashicorp.com/consul-sidecar-memory-request` - Override the default memory request. +- `consul.hashicorp.com/enable-sidecar-proxy-lifecycle` - Override the default Helm value [`connectInject.sidecarProxy.lifecycle.defaultEnabled`](/consul/docs/k8s/helm#v-connectinject-sidecarproxy-lifecycle-defaultenabled) +- `consul.hashicorp.com/enable-sidecar-proxy-shutdown-drain-listeners` - Override the default Helm value [`connectInject.sidecarProxy.lifecycle.defaultEnableShutdownDrainListeners`](/consul/docs/k8s/helm#v-connectinject-sidecarproxy-lifecycle-defaultenableshutdowndrainlisteners) +- `consul.hashicorp.com/sidecar-proxy-lifecycle-shutdown-grace-period-seconds` - Override the default Helm value [`connectInject.sidecarProxy.lifecycle.defaultShutdownGracePeriodSeconds`](/consul/docs/k8s/helm#v-connectinject-sidecarproxy-lifecycle-defaultshutdowngraceperiodseconds) +- `consul.hashicorp.com/sidecar-proxy-lifecycle-graceful-port` - Override the default Helm value [`connectInject.sidecarProxy.lifecycle.defaultGracefulPort`](/consul/docs/k8s/helm#v-connectinject-sidecarproxy-lifecycle-defaultgracefulport) +- `consul.hashicorp.com/sidecar-proxy-lifecycle-graceful-shutdown-path` - Override the default Helm value [`connectInject.sidecarProxy.lifecycle.defaultGracefulShutdownPath`](/consul/docs/k8s/helm#v-connectinject-sidecarproxy-lifecycle-defaultgracefulshutdownpath) + - `consul.hashicorp.com/enable-metrics` - Override the default Helm value [`connectInject.metrics.defaultEnabled`](/consul/docs/k8s/helm#v-connectinject-metrics-defaultenabled). - `consul.hashicorp.com/enable-metrics-merging` - Override the default Helm value [`connectInject.metrics.defaultEnableMerging`](/consul/docs/k8s/helm#v-connectinject-metrics-defaultenablemerging). - `consul.hashicorp.com/merged-metrics-port` - Override the default Helm value [`connectInject.metrics.defaultMergedMetricsPort`](/consul/docs/k8s/helm#v-connectinject-metrics-defaultmergedmetricsport). @@ -281,21 +287,21 @@ Resource labels could be used on a Kubernetes service to control connect-inject registration to ignore all services except for the one which should be used for routing requests using Consul. -## Service Sync +## Service Sync ### Annotations The following Kubernetes resource annotations could be used on a pod to [Service Sync](https://developer.hashicorp.com/consul/docs/k8s/service-sync) behavior: -- `consul.hashicorp.com/service-sync`: If this is set to `true`, then the Kubernetes service is explicitly configured to be synced to Consul. +- `consul.hashicorp.com/service-sync`: If this is set to `true`, then the Kubernetes service is explicitly configured to be synced to Consul. ```yaml annotations: 'consul.hashicorp.com/service-sync': 'true' ``` -- `consul.hashicorp.com/service-port`: Configures the port to register to the Consul Catalog for the Kubernetes service. The annotation value may be a name of a port (recommended) or an exact port value. Refer to [service ports](https://developer.hashicorp.com/consul/docs/k8s/service-sync#service-ports) for more information. - +- `consul.hashicorp.com/service-port`: Configures the port to register to the Consul Catalog for the Kubernetes service. The annotation value may be a name of a port (recommended) or an exact port value. Refer to [service ports](https://developer.hashicorp.com/consul/docs/k8s/service-sync#service-ports) for more information. + ```yaml annotations: 'consul.hashicorp.com/service-port': 'http' @@ -315,7 +321,7 @@ The following Kubernetes resource annotations could be used on a pod to [Service 'consul.hashicorp.com/service-meta-KEY': 'value' ``` -- `consul.hashicorp.com/service-weight:` - Configures ability to support weighted loadbalancing by service annotation for Catalog Sync. The integer provided will be applied as a weight for the `passing` state for the health of the service. Refer to [weights](/consul/docs/services/configuration/services-configuration-reference#weights) in service configuration for more information on how this is leveraged for services in the Consul catalog. +- `consul.hashicorp.com/service-weight:` - Configures ability to support weighted loadbalancing by service annotation for Catalog Sync. The integer provided will be applied as a weight for the `passing` state for the health of the service. Refer to [weights](/consul/docs/services/configuration/services-configuration-reference#weights) in service configuration for more information on how this is leveraged for services in the Consul catalog. ```yaml annotations: From f472164f053aac8990edf1939adaad489539697f Mon Sep 17 00:00:00 2001 From: Tom Davies Date: Wed, 12 Jul 2023 16:24:12 +0100 Subject: [PATCH 17/43] Pass configured role name to Vault for AWS auth in Connect CA (#17885) --- .changelog/17885.txt | 2 ++ agent/connect/ca/provider_vault_auth_aws.go | 7 +++++++ agent/connect/ca/provider_vault_auth_test.go | 17 ++++++++++++++--- 3 files changed, 23 insertions(+), 3 deletions(-) create mode 100644 .changelog/17885.txt diff --git a/.changelog/17885.txt b/.changelog/17885.txt new file mode 100644 index 000000000000..2cd690488d92 --- /dev/null +++ b/.changelog/17885.txt @@ -0,0 +1,2 @@ +```release-note:bug +ca: Fixed a bug where the Vault provider was not passing the configured role param for AWS auth diff --git a/agent/connect/ca/provider_vault_auth_aws.go b/agent/connect/ca/provider_vault_auth_aws.go index 59f1c98035af..02abf39824cb 100644 --- a/agent/connect/ca/provider_vault_auth_aws.go +++ b/agent/connect/ca/provider_vault_auth_aws.go @@ -72,6 +72,13 @@ func (g *AWSLoginDataGenerator) GenerateLoginData(authMethod *structs.VaultAuthM if err != nil { return nil, fmt.Errorf("aws auth failed to generate login data: %w", err) } + + // If a Vault role name is specified, we need to manually add this + role, ok := authMethod.Params["role"] + if ok { + loginData["role"] = role + } + return loginData, nil } diff --git a/agent/connect/ca/provider_vault_auth_test.go b/agent/connect/ca/provider_vault_auth_test.go index c6979dbbe50a..74507acb39e7 100644 --- a/agent/connect/ca/provider_vault_auth_test.go +++ b/agent/connect/ca/provider_vault_auth_test.go @@ -278,15 +278,22 @@ func TestVaultCAProvider_AWSCredentialsConfig(t *testing.T) { func TestVaultCAProvider_AWSLoginDataGenerator(t *testing.T) { cases := map[string]struct { - expErr error + expErr error + authMethod structs.VaultAuthMethod }{ - "valid login data": {}, + "valid login data": { + authMethod: structs.VaultAuthMethod{}, + }, + "with role": { + expErr: nil, + authMethod: structs.VaultAuthMethod{Type: "aws", MountPath: "", Params: map[string]interface{}{"role": "test-role"}}, + }, } for name, c := range cases { t.Run(name, func(t *testing.T) { ldg := &AWSLoginDataGenerator{credentials: credentials.AnonymousCredentials} - loginData, err := ldg.GenerateLoginData(&structs.VaultAuthMethod{}) + loginData, err := ldg.GenerateLoginData(&c.authMethod) if c.expErr != nil { require.Error(t, err) require.Contains(t, err.Error(), c.expErr.Error()) @@ -307,6 +314,10 @@ func TestVaultCAProvider_AWSLoginDataGenerator(t *testing.T) { require.True(t, exists, "missing expected key: %s", key) require.NotEmpty(t, val, "expected non-empty value for key: %s", key) } + + if c.authMethod.Params["role"] != nil { + require.Equal(t, c.authMethod.Params["role"], loginData["role"]) + } }) } } From ebfed566b28bd02aa461b360b90c37ef512d847c Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Wed, 12 Jul 2023 09:54:35 -0700 Subject: [PATCH 18/43] Docs for dataplane upgrade on k8s (#18051) * Docs for dataplane upgrade on k8s --------- Co-authored-by: David Yu Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- website/content/docs/k8s/upgrade/index.mdx | 35 ++++++++++++---------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/website/content/docs/k8s/upgrade/index.mdx b/website/content/docs/k8s/upgrade/index.mdx index e41141c142a6..695e615b7bf9 100644 --- a/website/content/docs/k8s/upgrade/index.mdx +++ b/website/content/docs/k8s/upgrade/index.mdx @@ -219,25 +219,23 @@ In earlier versions, Consul on Kubernetes used client agents in its deployments. If you upgrade Consul from a version that uses client agents to a version the uses dataplanes, complete the following steps to upgrade your deployment safely and without downtime. -1. Before you upgrade, edit your Helm chart configuration to enable Consul client agents by setting `client.enabled` and `client.updateStrategy`: +1. If ACLs are enabled, you must first upgrade to consul-k8s 0.49.8 or above. These versions expose the setting `connectInject.prepareDataplanesUpgrade` + which is required for no-downtime upgrades when ACLs are enabled. + + Set `connectInject.prepareDataplanesUpgrade` to `true` and then perform the upgrade to 0.49.8 or above (whichever is the latest in the 0.49.x series) ```yaml filename="values.yaml" - client: - enabled: true - updateStrategy: | - type: OnDelete + connectInject: + prepareDataplanesUpgrade: true ``` -1. Update the `connect-injector` to not log out on restart -to make sure that the ACL tokens used by existing services are still valid during the migration to `consul-dataplane`. -Note that you must remove the token manually after completing the migration. +1. Consul dataplanes disables Consul clients by default, but during an upgrade you need to ensure Consul clients continue to run. Edit your Helm chart configuration and set the [`client.enabled`](/consul/docs/k8s/helm#v-client-enabled) field to `true` and specify an action for Consul to take during the upgrade process in the [`client.updateStrategy`](/consul/docs/k8s/helm#v-client-updatestrategy) field: - The following command triggers the deployment rollout. Wait for the rollout to complete before proceeding to next step. - - ```bash - kubectl config set-context --current --namespace= - INJECTOR_DEPLOYMENT=$(kubectl get deploy -l "component=connect-injector" -o=jsonpath='{.items[0].metadata.name}') - kubectl patch deploy $INJECTOR_DEPLOYMENT --type='json' -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/lifecycle"}]' + ```yaml filename="values.yaml" + client: + enabled: true + updateStrategy: | + type: OnDelete ``` 1. Follow our [recommended procedures to upgrade servers](#upgrade-consul-servers) on Kubernetes deployments to upgrade Helm values for the new version of Consul. The latest version of consul-k8s components may be in a CrashLoopBackoff state during the performance of the server upgrade from versions <1.14.x until all Consul servers are on versions >=1.14.x. Components in CrashLoopBackoff will not negatively affect the cluster because older versioned components will still be operating. Once all servers have been fully upgraded, the latest consul-k8s components will automatically restore from CrashLoopBackoff and older component versions will be spun down. @@ -246,7 +244,14 @@ Note that you must remove the token manually after completing the migration. 1. Restart all gateways in your service mesh. -1. Disable client agents in your Helm chart by deleting the `client` stanza or setting `client.enabled` to `false` and running a `consul-k8s` or Helm upgrade. +1. Now that all services and gateways are using Consul dataplanes, disable client agents in your Helm chart by deleting the `client` stanza or setting `client.enabled` to `false` and running a `consul-k8s` or Helm upgrade. + +1. If ACLs are enabled, outdated ACL tokens will persist a result of the upgrade. You can manually delete the tokens to declutter your Consul environment. + + Outdated connect-injector tokens have the following description: `token created via login: {"component":"connect-injector"}`. Do not delete + the tokens that have a description where `pod` is a key, for example `token created via login: {"component":"connect-injector","pod":"default/consul-connect-injector-576b65747c-9547x"}`). The dataplane-enabled connect inject pods use these tokens. + + You can also review the creation date for the tokens and only delete the injector tokens created before your upgrade, but do not delete all old tokens without considering if they are still in use. Some tokens, such as the server tokens, are still necessary. ## Configuring TLS on an existing cluster From f51a9d29aea20bd52d8d0bdba38cba9ea476e653 Mon Sep 17 00:00:00 2001 From: John Murret Date: Wed, 12 Jul 2023 10:56:38 -0600 Subject: [PATCH 19/43] docs - update upgrade index page to not recommend consul leave. (#18100) --- website/content/docs/upgrading/index.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/content/docs/upgrading/index.mdx b/website/content/docs/upgrading/index.mdx index 27500a4cb4ef..4b3ecad8d848 100644 --- a/website/content/docs/upgrading/index.mdx +++ b/website/content/docs/upgrading/index.mdx @@ -36,7 +36,8 @@ Consul is A, and version B is released. 2. On each Consul server agent, install version B of Consul. -3. One Consul server agent at a time, shut down version A via `consul leave` and restart with version B. Wait until +3. One Consul server agent at a time, use a service management system + (e.g., systemd, upstart, etc.) to restart the Consul service with version B. Wait until the server agent is healthy and has rejoined the cluster before moving on to the next server agent. From 2f20c77e4df9d663ee5f06d811c23dd9ad14db88 Mon Sep 17 00:00:00 2001 From: Vijay Date: Thu, 13 Jul 2023 01:04:39 +0530 Subject: [PATCH 20/43] Displays Consul version of each nodes in UI nodes section (#17754) * update UINodes and UINodeInfo response with consul-version info added as NodeMeta, fetched from serf members * update test cases TestUINodes, TestUINodeInfo * added nil check for map * add consul-version in local agent node metadata * get consul version from serf member and add this as node meta in catalog register request * updated ui mock response to include consul versions as node meta * updated ui trans and added version as query param to node list route * updates in ui templates to display consul version with filter and sorts * updates in ui - model class, serializers,comparators,predicates for consul version feature * added change log for Consul Version Feature * updated to get version from consul service, if for some reason not available from serf * updated changelog text * updated dependent testcases * multiselection version filter * Update agent/consul/state/catalog.go comments updated Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> --------- Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> --- .changelog/17754.txt | 3 + agent/agent.go | 1 + agent/agent_endpoint_test.go | 3 +- agent/consul/leader.go | 10 ++ agent/consul/state/catalog.go | 7 ++ agent/consul/state/catalog_test.go | 25 ++++- agent/consul/state/state_store_test.go | 31 ++++++ agent/local/state_test.go | 12 ++- agent/structs/structs.go | 3 + agent/ui_endpoint.go | 96 +++++++++++++++++++ agent/ui_endpoint_test.go | 6 ++ api/catalog_test.go | 1 + api/txn_test.go | 5 +- sdk/testutil/server.go | 2 + .../app/components/consul/node/list/index.hbs | 12 +++ .../consul/node/search-bar/index.hbs | 41 ++++++-- .../consul-ui/app/filter/predicates/node.js | 8 ++ ui/packages/consul-ui/app/models/node.js | 5 + .../consul-ui/app/serializers/application.js | 45 +++++++++ .../consul-ui/app/sort/comparators/node.js | 37 +++++++ .../app/templates/dc/nodes/index.hbs | 8 +- .../consul-ui/mock-api/v1/internal/ui/node/_ | 1 + .../consul-ui/mock-api/v1/internal/ui/nodes | 1 + .../tests/unit/sort/comparators/node-test.js | 45 +++++++++ .../consul-ui/translations/common/en-us.yaml | 4 + .../consul-ui/vendor/consul-ui/routes.js | 1 + 26 files changed, 397 insertions(+), 16 deletions(-) create mode 100644 .changelog/17754.txt create mode 100644 ui/packages/consul-ui/tests/unit/sort/comparators/node-test.js diff --git a/.changelog/17754.txt b/.changelog/17754.txt new file mode 100644 index 000000000000..32272ec1ae91 --- /dev/null +++ b/.changelog/17754.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Display the Consul agent version in the nodes list, and allow filtering and sorting of nodes based on versions. +``` \ No newline at end of file diff --git a/agent/agent.go b/agent/agent.go index fa75a1cd1cf4..881b94209d84 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -3999,6 +3999,7 @@ func (a *Agent) loadMetadata(conf *config.RuntimeConfig) error { meta[k] = v } meta[structs.MetaSegmentKey] = conf.SegmentName + meta[structs.MetaConsulVersion] = conf.Version return a.State.LoadMetadata(meta) } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index c465b687a880..1a275f61afb3 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -1506,7 +1506,8 @@ func TestAgent_Self(t *testing.T) { require.NoError(t, err) require.Equal(t, cs[a.config.SegmentName], val.Coord) - delete(val.Meta, structs.MetaSegmentKey) // Added later, not in config. + delete(val.Meta, structs.MetaSegmentKey) // Added later, not in config. + delete(val.Meta, structs.MetaConsulVersion) // Added later, not in config. require.Equal(t, a.config.NodeMeta, val.Meta) if tc.expectXDS { diff --git a/agent/consul/leader.go b/agent/consul/leader.go index c91655c5c863..4bc1908d5fe8 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -1087,6 +1087,13 @@ AFTER_CHECK: "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), ) + // Get consul version from serf member + // add this as node meta in catalog register request + buildVersion, err := metadata.Build(&member) + if err != nil { + return err + } + // Register with the catalog. req := structs.RegisterRequest{ Datacenter: s.config.Datacenter, @@ -1102,6 +1109,9 @@ AFTER_CHECK: Output: structs.SerfCheckAliveOutput, }, EnterpriseMeta: *nodeEntMeta, + NodeMeta: map[string]string{ + structs.MetaConsulVersion: buildVersion.String(), + }, } if node != nil { req.TaggedAddresses = node.TaggedAddresses diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 74efc3229556..4e9fcf716c47 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -3450,6 +3450,13 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, ws.AddWithLimit(watchLimit, services.WatchCh(), allServicesCh) for service := services.Next(); service != nil; service = services.Next() { ns := service.(*structs.ServiceNode).ToNodeService() + // If version isn't defined in node meta, set it from the Consul service meta + if _, ok := dump.Meta[structs.MetaConsulVersion]; !ok && ns.ID == "consul" && ns.Meta["version"] != "" { + if dump.Meta == nil { + dump.Meta = make(map[string]string) + } + dump.Meta[structs.MetaConsulVersion] = ns.Meta["version"] + } dump.Services = append(dump.Services, ns) } diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index 0de535c3b4b3..e6b279580b03 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -4837,6 +4837,9 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { } // Register some nodes + // node1 is registered withOut any nodemeta, and a consul service with id + // 'consul' is added later with meta 'version'. The expected node must have + // meta 'consul-version' with same value testRegisterNode(t, s, 0, "node1") testRegisterNode(t, s, 1, "node2") @@ -4845,6 +4848,8 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { testRegisterService(t, s, 3, "node1", "service2") testRegisterService(t, s, 4, "node2", "service1") testRegisterService(t, s, 5, "node2", "service2") + // Register consul service with meta 'version' for node1 + testRegisterServiceWithMeta(t, s, 10, "node1", "consul", map[string]string{"version": "1.17.0"}) // Register service-level checks testRegisterCheck(t, s, 6, "node1", "service1", "check1", api.HealthPassing) @@ -4894,6 +4899,19 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { }, }, Services: []*structs.NodeService{ + { + ID: "consul", + Service: "consul", + Address: "1.1.1.1", + Meta: map[string]string{"version": "1.17.0"}, + Port: 1111, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 10, + ModifyIndex: 10, + }, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, { ID: "service1", Service: "service1", @@ -4921,6 +4939,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), }, }, + Meta: map[string]string{"consul-version": "1.17.0"}, }, &structs.NodeInfo{ Node: "node2", @@ -4988,7 +5007,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - if idx != 9 { + if idx != 10 { t.Fatalf("bad index: %d", idx) } require.Len(t, dump, 1) @@ -4999,8 +5018,8 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - if idx != 9 { - t.Fatalf("bad index: %d", 9) + if idx != 10 { + t.Fatalf("bad index: %d", idx) } if !reflect.DeepEqual(dump, expect) { t.Fatalf("bad: %#v", dump[0].Services[0]) diff --git a/agent/consul/state/state_store_test.go b/agent/consul/state/state_store_test.go index fef750253272..587f15c03d94 100644 --- a/agent/consul/state/state_store_test.go +++ b/agent/consul/state/state_store_test.go @@ -189,6 +189,37 @@ func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeI return svc } +// testRegisterServiceWithMeta registers service with Meta passed as arg. +func testRegisterServiceWithMeta(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, meta map[string]string, opts ...func(service *structs.NodeService)) *structs.NodeService { + svc := &structs.NodeService{ + ID: serviceID, + Service: serviceID, + Address: "1.1.1.1", + Port: 1111, + Meta: meta, + } + for _, o := range opts { + o(svc) + } + + if err := s.EnsureService(idx, nodeID, svc); err != nil { + t.Fatalf("err: %s", err) + } + + tx := s.db.Txn(false) + defer tx.Abort() + service, err := tx.First(tableServices, indexID, NodeServiceQuery{Node: nodeID, Service: serviceID, PeerName: svc.PeerName}) + if err != nil { + t.Fatalf("err: %s", err) + } + if result, ok := service.(*structs.ServiceNode); !ok || + result.Node != nodeID || + result.ServiceID != serviceID { + t.Fatalf("bad service: %#v", result) + } + return svc +} + // testRegisterService register a service with given transaction idx // If the service already exists, transaction number might not be increased // Use `testRegisterServiceWithChange()` if you want perform a registration that diff --git a/agent/local/state_test.go b/agent/local/state_test.go index 0a78f321f7a5..4751352ec1c8 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -189,7 +189,8 @@ func TestAgentAntiEntropy_Services(t *testing.T) { id := services.NodeServices.Node.ID addrs := services.NodeServices.Node.TaggedAddresses meta := services.NodeServices.Node.Meta - delete(meta, structs.MetaSegmentKey) // Added later, not in config. + delete(meta, structs.MetaSegmentKey) // Added later, not in config. + delete(meta, structs.MetaConsulVersion) // Added later, not in config. assert.Equal(t, a.Config.NodeID, id) assert.Equal(t, a.Config.TaggedAddresses, addrs) assert.Equal(t, unNilMap(a.Config.NodeMeta), meta) @@ -1355,7 +1356,8 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { id := services.NodeServices.Node.ID addrs := services.NodeServices.Node.TaggedAddresses meta := services.NodeServices.Node.Meta - delete(meta, structs.MetaSegmentKey) // Added later, not in config. + delete(meta, structs.MetaSegmentKey) // Added later, not in config. + delete(meta, structs.MetaConsulVersion) // Added later, not in config. assert.Equal(r, a.Config.NodeID, id) assert.Equal(r, a.Config.TaggedAddresses, addrs) assert.Equal(r, unNilMap(a.Config.NodeMeta), meta) @@ -2016,7 +2018,8 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) { addrs := services.NodeServices.Node.TaggedAddresses meta := services.NodeServices.Node.Meta nodeLocality := services.NodeServices.Node.Locality - delete(meta, structs.MetaSegmentKey) // Added later, not in config. + delete(meta, structs.MetaSegmentKey) // Added later, not in config. + delete(meta, structs.MetaConsulVersion) // Added later, not in config. require.Equal(t, a.Config.NodeID, id) require.Equal(t, a.Config.TaggedAddresses, addrs) require.Equal(t, a.Config.StructLocality(), nodeLocality) @@ -2041,7 +2044,8 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) { addrs := services.NodeServices.Node.TaggedAddresses meta := services.NodeServices.Node.Meta nodeLocality := services.NodeServices.Node.Locality - delete(meta, structs.MetaSegmentKey) // Added later, not in config. + delete(meta, structs.MetaSegmentKey) // Added later, not in config. + delete(meta, structs.MetaConsulVersion) // Added later, not in config. require.Equal(t, nodeID, id) require.Equal(t, a.Config.TaggedAddresses, addrs) require.Equal(t, a.Config.StructLocality(), nodeLocality) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 59385fa5ba44..096f767a4771 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -220,6 +220,9 @@ const ( // WildcardSpecifier is the string which should be used for specifying a wildcard // The exact semantics of the wildcard is left up to the code where its used. WildcardSpecifier = "*" + + // MetaConsulVersion is the node metadata key used to store the node's consul version + MetaConsulVersion = "consul-version" ) var allowedConsulMetaKeysForMeshGateway = map[string]struct{}{MetaWANFederationKey: {}} diff --git a/agent/ui_endpoint.go b/agent/ui_endpoint.go index 3de9eac15568..8f5184969693 100644 --- a/agent/ui_endpoint.go +++ b/agent/ui_endpoint.go @@ -13,9 +13,12 @@ import ( "strings" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/serf/serf" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/consul" + "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/logging" @@ -110,7 +113,18 @@ RPC: return nil, err } + // Get version info for all serf members into a map of key-address,value-version. + // This logic of calling 'AgentMembersMapAddrVer()' and inserting version info in this func + // can be discarded in future releases ( may be after 3 or 4 minor releases), + // when all the nodes are registered with consul-version in nodemeta. + var err error + mapAddrVer, err := AgentMembersMapAddrVer(s, req) + if err != nil { + return nil, err + } + // Use empty list instead of nil + // Also check if consul-version exists in Meta, else add it for _, info := range out.Dump { if info.Services == nil { info.Services = make([]*structs.NodeService, 0) @@ -118,12 +132,24 @@ RPC: if info.Checks == nil { info.Checks = make([]*structs.HealthCheck, 0) } + // Check if Node Meta - 'consul-version' already exists by virtue of adding + // 'consul-version' during node registration itself. + // If not, get it from mapAddrVer. + if _, ok := info.Meta[structs.MetaConsulVersion]; !ok { + if _, okver := mapAddrVer[info.Address]; okver { + if info.Meta == nil { + info.Meta = make(map[string]string) + } + info.Meta[structs.MetaConsulVersion] = mapAddrVer[info.Address] + } + } } if out.Dump == nil { out.Dump = make(structs.NodeDump, 0) } // Use empty list instead of nil + // Also check if consul-version exists in Meta, else add it for _, info := range out.ImportedDump { if info.Services == nil { info.Services = make([]*structs.NodeService, 0) @@ -131,11 +157,60 @@ RPC: if info.Checks == nil { info.Checks = make([]*structs.HealthCheck, 0) } + // Check if Node Meta - 'consul-version' already exists by virtue of adding + // 'consul-version' during node registration itself. + // If not, get it from mapAddrVer. + if _, ok := info.Meta[structs.MetaConsulVersion]; !ok { + if _, okver := mapAddrVer[info.Address]; okver { + if info.Meta == nil { + info.Meta = make(map[string]string) + } + info.Meta[structs.MetaConsulVersion] = mapAddrVer[info.Address] + } + } } return append(out.Dump, out.ImportedDump...), nil } +// AgentMembersMapAddrVer is used to get version info from all serf members into a +// map of key-address,value-version. +func AgentMembersMapAddrVer(s *HTTPHandlers, req *http.Request) (map[string]string, error) { + var members []serf.Member + + //Get WAN Members + wanMembers := s.agent.WANMembers() + + //Get LAN Members + //Get the request partition and default to that of the agent. + entMeta := s.agent.AgentEnterpriseMeta() + if err := s.parseEntMetaPartition(req, entMeta); err != nil { + return nil, err + } + filter := consul.LANMemberFilter{ + Partition: entMeta.PartitionOrDefault(), + } + filter.AllSegments = true + lanMembers, err := s.agent.delegate.LANMembers(filter) + if err != nil { + return nil, err + } + + //aggregate members + members = append(wanMembers, lanMembers...) + + //create a map with key as IPv4 address and value as consul-version + mapAddrVer := make(map[string]string, len(members)) + for i := range members { + buildVersion, err := metadata.Build(&members[i]) + if err == nil { + mapAddrVer[members[i].Addr.String()] = buildVersion.String() + } + } + + return mapAddrVer, nil +} + // UINodeInfo is used to get info on a single node in a given datacenter. We return a // NodeInfo which provides overview information for the node func (s *HTTPHandlers) UINodeInfo(resp http.ResponseWriter, req *http.Request) (interface{}, error) { @@ -172,6 +247,16 @@ RPC: return nil, err } + // Get version info for all serf members into a map of key-address,value-version. + // This logic of calling 'AgentMembersMapAddrVer()' and inserting version info in this func + // can be discarded in future releases ( may be after 3 or 4 minor releases), + // when all the nodes are registered with consul-version in nodemeta. + var err error + mapAddrVer, err := AgentMembersMapAddrVer(s, req) + if err != nil { + return nil, err + } + // Return only the first entry if len(out.Dump) > 0 { info := out.Dump[0] @@ -181,6 +266,17 @@ RPC: if info.Checks == nil { info.Checks = make([]*structs.HealthCheck, 0) } + // Check if Node Meta - 'consul-version' already exists by virtue of adding + // 'consul-version' during node registration itself. + // If not, get it from mapAddrVer. + if _, ok := info.Meta[structs.MetaConsulVersion]; !ok { + if _, okver := mapAddrVer[info.Address]; okver { + if info.Meta == nil { + info.Meta = make(map[string]string) + } + info.Meta[structs.MetaConsulVersion] = mapAddrVer[info.Address] + } + } return info, nil } diff --git a/agent/ui_endpoint_test.go b/agent/ui_endpoint_test.go index 5fc2e06d3485..f6810db801d6 100644 --- a/agent/ui_endpoint_test.go +++ b/agent/ui_endpoint_test.go @@ -162,6 +162,9 @@ func TestUINodes(t *testing.T) { require.Len(t, nodes[2].Services, 0) require.NotNil(t, nodes[1].Checks) require.Len(t, nodes[2].Services, 0) + + // check for consul-version in node meta + require.Equal(t, nodes[0].Meta[structs.MetaConsulVersion], a.Config.Version) } func TestUINodes_Filter(t *testing.T) { @@ -260,6 +263,9 @@ func TestUINodeInfo(t *testing.T) { node.Checks == nil || len(node.Checks) != 0 { t.Fatalf("bad: %v", node) } + + // check for consul-version in node meta + require.Equal(t, node.Meta[structs.MetaConsulVersion], a.Config.Version) } func TestUIServices(t *testing.T) { diff --git a/api/catalog_test.go b/api/catalog_test.go index 6226691353f2..2b0a4097b332 100644 --- a/api/catalog_test.go +++ b/api/catalog_test.go @@ -65,6 +65,7 @@ func TestAPI_CatalogNodes(t *testing.T) { }, Meta: map[string]string{ "consul-network-segment": "", + "consul-version": s.Config.Version, }, } require.Equal(r, want, got) diff --git a/api/txn_test.go b/api/txn_test.go index 975f3e38163b..ea454976daea 100644 --- a/api/txn_test.go +++ b/api/txn_test.go @@ -361,7 +361,10 @@ func TestAPI_ClientTxn(t *testing.T) { "wan": s.Config.Bind, "wan_ipv4": s.Config.Bind, }, - Meta: map[string]string{"consul-network-segment": ""}, + Meta: map[string]string{ + "consul-network-segment": "", + "consul-version": s.Config.Version, + }, CreateIndex: ret.Results[1].Node.CreateIndex, ModifyIndex: ret.Results[1].Node.ModifyIndex, }, diff --git a/sdk/testutil/server.go b/sdk/testutil/server.go index d00850d5e17a..a20f95123aab 100644 --- a/sdk/testutil/server.go +++ b/sdk/testutil/server.go @@ -130,6 +130,7 @@ type TestServerConfig struct { Args []string `json:"-"` ReturnPorts func() `json:"-"` Audit *TestAuditConfig `json:"audit,omitempty"` + Version string `json:"version,omitempty"` } type TestACLs struct { @@ -212,6 +213,7 @@ func defaultServerConfig(t TestingTB, consulVersion *version.Version) *TestServe Stdout: logBuffer, Stderr: logBuffer, Peering: &TestPeeringConfig{Enabled: true}, + Version: consulVersion.String(), } // Add version-specific tweaks diff --git a/ui/packages/consul-ui/app/components/consul/node/list/index.hbs b/ui/packages/consul-ui/app/components/consul/node/list/index.hbs index c1392bd68682..f57d7d5ab052 100644 --- a/ui/packages/consul-ui/app/components/consul/node/list/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/node/list/index.hbs @@ -50,5 +50,17 @@ as |item index|> {{item.Address}} +
+
+ ConsulVersion +
+
+ {{!-- Displaying consul version from node meta data --}} + {{#if item.Meta.consul-version}} + + v{{item.Meta.consul-version}} + {{/if}} +
+
diff --git a/ui/packages/consul-ui/app/components/consul/node/search-bar/index.hbs b/ui/packages/consul-ui/app/components/consul/node/search-bar/index.hbs index 3aa8681812c0..995f9e773bcb 100644 --- a/ui/packages/consul-ui/app/components/consul/node/search-bar/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/node/search-bar/index.hbs @@ -19,12 +19,14 @@ ) ) - (t (concat "components.consul.node.search-bar." search.status.value) - default=(array - (concat "common.search." search.status.value) - (concat "common.consul." search.status.value) - (concat "common.brand." search.status.value) - ) + (if search.status.value + search.status.value + (t (concat "components.consul.node.search-bar." search.status.value) + default=(array + (concat "common.search." search.status.value) + (concat "common.consul." search.status.value) + (concat "common.brand." search.status.value) + )) ) as |key value|}} @@ -95,6 +97,27 @@ as |key value|}} {{/let}} + + + + {{t "common.consul.version"}} + + + + {{#let components.Optgroup components.Option as |Optgroup Option|}} + {{#each @versions as |version|}} + + {{/each}} + {{/let}} + + <:sort as |search|> {{t "common.sort.alpha.asc"}} + + + + {{/let}} diff --git a/ui/packages/consul-ui/app/filter/predicates/node.js b/ui/packages/consul-ui/app/filter/predicates/node.js index b5c752ab9bea..4927078bf988 100644 --- a/ui/packages/consul-ui/app/filter/predicates/node.js +++ b/ui/packages/consul-ui/app/filter/predicates/node.js @@ -9,4 +9,12 @@ export default { warning: (item, value) => item.Status === value, critical: (item, value) => item.Status === value, }, + version: (item, value) => { + for (const element of value) { + if (item.Version.includes(element + '.')) { + return true; + } + } + return false; + }, }; diff --git a/ui/packages/consul-ui/app/models/node.js b/ui/packages/consul-ui/app/models/node.js index e56f3e1fff86..b95ec283050a 100644 --- a/ui/packages/consul-ui/app/models/node.js +++ b/ui/packages/consul-ui/app/models/node.js @@ -67,4 +67,9 @@ export default class Node extends Model { get ChecksWarning() { return this.NodeChecks.filter((item) => item.Status === 'warning').length; } + + @computed('Meta') + get Version() { + return this.Meta?.['consul-version'] ?? ''; + } } diff --git a/ui/packages/consul-ui/app/serializers/application.js b/ui/packages/consul-ui/app/serializers/application.js index 2e6bac137b27..d9e58b0ab0bb 100644 --- a/ui/packages/consul-ui/app/serializers/application.js +++ b/ui/packages/consul-ui/app/serializers/application.js @@ -156,6 +156,10 @@ export default class ApplicationSerializer extends Serializer { // ember-data methods so we have the opportunity to do this on a per-model // level const meta = this.normalizeMeta(store, modelClass, normalizedPayload, id, requestType); + // get distinct consul versions from list and add it as meta + if (modelClass.modelName === 'node' && requestType === 'query') { + meta.versions = this.getDistinctConsulVersions(normalizedPayload); + } if (requestType !== 'query') { normalizedPayload.meta = meta; } @@ -215,4 +219,45 @@ export default class ApplicationSerializer extends Serializer { normalizePayload(payload, id, requestType) { return payload; } + + // getDistinctConsulVersions will be called only for nodes and query request type + // the list of versions is to be added as meta to resp, without changing original response structure + // hence this function is added in application.js + getDistinctConsulVersions(payload) { + // create a Set and add version with only major.minor : ex-1.24.6 as 1.24 + let versionSet = new Set(); + payload.forEach(function (item) { + if (item.Meta && item.Meta['consul-version'] !== '') { + const split = item.Meta['consul-version'].split('.'); + versionSet.add(split[0] + '.' + split[1]); + } + }); + + const versionArray = Array.from(versionSet); + + // Sort the array in descending order using a custom comparison function + versionArray.sort((a, b) => { + // Split the versions into arrays of numbers + const versionA = a.split('.').map((part) => { + const number = Number(part); + return isNaN(number) ? 0 : number; + }); + const versionB = b.split('.').map((part) => { + const number = Number(part); + return isNaN(number) ? 0 : number; + }); + + const minLength = Math.min(versionA.length, versionB.length); + + // start with comparing major version num, if equal then compare minor + for (let i = 0; i < minLength; i++) { + if (versionA[i] !== versionB[i]) { + return versionB[i] - versionA[i]; + } + } + return versionB.length - versionA.length; + }); + + return versionArray; //sorted array + } } diff --git a/ui/packages/consul-ui/app/sort/comparators/node.js b/ui/packages/consul-ui/app/sort/comparators/node.js index c584456e3e87..fb70550750ea 100644 --- a/ui/packages/consul-ui/app/sort/comparators/node.js +++ b/ui/packages/consul-ui/app/sort/comparators/node.js @@ -38,6 +38,43 @@ export default ({ properties }) => return 0; } }; + } else if (key.startsWith('Version:')) { + return function (itemA, itemB) { + const [, dir] = key.split(':'); + let a, b; + if (dir === 'asc') { + a = itemA; + b = itemB; + } else { + b = itemA; + a = itemB; + } + + // Split the versions into arrays of numbers + const versionA = a.Version.split('.').map((part) => { + const number = Number(part); + return isNaN(number) ? 0 : number; + }); + const versionB = b.Version.split('.').map((part) => { + const number = Number(part); + return isNaN(number) ? 0 : number; + }); + + const minLength = Math.min(versionA.length, versionB.length); + + for (let i = 0; i < minLength; i++) { + const diff = versionA[i] - versionB[i]; + switch (true) { + case diff > 0: + return 1; + case diff < 0: + return -1; + } + } + + return versionA.length - versionB.length; + }; } + return properties(['Node'])(key); }; diff --git a/ui/packages/consul-ui/app/templates/dc/nodes/index.hbs b/ui/packages/consul-ui/app/templates/dc/nodes/index.hbs index 4d0eb2e3abea..56f7676c4924 100644 --- a/ui/packages/consul-ui/app/templates/dc/nodes/index.hbs +++ b/ui/packages/consul-ui/app/templates/dc/nodes/index.hbs @@ -40,10 +40,15 @@ change=(action (mut searchproperty) value='target.selectedItems') default=this._searchProperties ) + version=(hash + value=(if this.version (split this.version ',') undefined) + change=(action (mut this.version) value='target.selectedItems') + ) ) api.data leader.data - as |sort filters items leader| + api.data.meta.versions + as |sort filters items leader versions| }} {{#let (reject-by 'Meta.synthetic-node' items) as |filtered|}} @@ -61,6 +66,7 @@ @onsearch={{action (mut search) value='target.value'}} @sort={{sort}} @filter={{filters}} + @versions={{versions}} /> {{/if}} diff --git a/ui/packages/consul-ui/mock-api/v1/internal/ui/node/_ b/ui/packages/consul-ui/mock-api/v1/internal/ui/node/_ index 27afd9c8ef4d..2adf163f7217 100644 --- a/ui/packages/consul-ui/mock-api/v1/internal/ui/node/_ +++ b/ui/packages/consul-ui/mock-api/v1/internal/ui/node/_ @@ -30,6 +30,7 @@ return ` "TaggedAddresses":{"lan":"${ip}","wan":"${ip}"}, "Meta":{ "consul-network-segment":"", + "consul-version": "${env('CONSUL_VERSION') ? fake.helpers.randomize([env('CONSUL_VERSION'),"1.10.4","1.15.2", "1.17.8","1.7.2","1.12.4", "1.17.2","1.0.9","2.0.2"]) : fake.helpers.randomize(["1.10.4","1.15.2", "1.17.8","1.7.2","1.12.4", "1.17.2","1.0.9","2.0.2"]) }", "consul-dashboard-url": "${fake.internet.protocol()}://${fake.internet.domainName()}/?id={{Node}}" }, "Services":[ diff --git a/ui/packages/consul-ui/mock-api/v1/internal/ui/nodes b/ui/packages/consul-ui/mock-api/v1/internal/ui/nodes index 2740d824a4f6..de64b0bb8eb0 100644 --- a/ui/packages/consul-ui/mock-api/v1/internal/ui/nodes +++ b/ui/packages/consul-ui/mock-api/v1/internal/ui/nodes @@ -25,6 +25,7 @@ }, "Meta": { "consul-network-segment":"", + "consul-version": "${env('CONSUL_VERSION') ? fake.helpers.randomize([env('CONSUL_VERSION'),"1.10.4","1.15.2", "1.17.8","1.7.2","1.12.4", "1.17.2","1.0.9","2.0.2"]) : fake.helpers.randomize(["1.10.4","1.15.2", "1.17.8","1.7.2","1.12.4", "1.17.2","1.0.9","2.0.2"]) }", "synthetic-node": ${env('CONSUL_AGENTLESS_ENABLED') ? fake.helpers.randomize([true, false, false, false]) : false} }, "Services":[ diff --git a/ui/packages/consul-ui/tests/unit/sort/comparators/node-test.js b/ui/packages/consul-ui/tests/unit/sort/comparators/node-test.js new file mode 100644 index 000000000000..9b5c5ea75680 --- /dev/null +++ b/ui/packages/consul-ui/tests/unit/sort/comparators/node-test.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import comparators from 'consul-ui/sort/comparators/node'; +import { properties } from 'consul-ui/services/sort'; +import { module, test } from 'qunit'; + +module('Unit | Sort | Comparator | node', function () { + const comparator = comparators({ properties }); + test('items are sorted by a fake Version', function (assert) { + const items = [ + { + Version: '2.24.1', + }, + { + Version: '1.12.6', + }, + { + Version: '2.09.3', + }, + ]; + const comp = comparator('Version:asc'); + assert.equal(typeof comp, 'function'); + + const expected = [ + { + Version: '1.12.6', + }, + { + Version: '2.09.3', + }, + { + Version: '2.24.1', + }, + ]; + let actual = items.sort(comp); + assert.deepEqual(actual, expected); + + expected.reverse(); + actual = items.sort(comparator('Version:desc')); + assert.deepEqual(actual, expected); + }); +}); diff --git a/ui/packages/consul-ui/translations/common/en-us.yaml b/ui/packages/consul-ui/translations/common/en-us.yaml index fc7f542fef39..b0dc3bffeeda 100644 --- a/ui/packages/consul-ui/translations/common/en-us.yaml +++ b/ui/packages/consul-ui/translations/common/en-us.yaml @@ -53,6 +53,7 @@ consul: redundancyzone: Redundancy zone peername: Peer partition: Admin Partitions + version: Version search: search: Search searchproperty: Search Across @@ -77,6 +78,9 @@ sort: status: asc: Unhealthy to Healthy desc: Healthy to Unhealthy + version: + asc: Oldest to Latest + desc: Latest to Oldest validations: dns-hostname: help: | diff --git a/ui/packages/consul-ui/vendor/consul-ui/routes.js b/ui/packages/consul-ui/vendor/consul-ui/routes.js index 79aadcd6ad6a..ec25fb0a2428 100644 --- a/ui/packages/consul-ui/vendor/consul-ui/routes.js +++ b/ui/packages/consul-ui/vendor/consul-ui/routes.js @@ -218,6 +218,7 @@ queryParams: { sortBy: 'sort', status: 'status', + version: 'version', searchproperty: { as: 'searchproperty', empty: [['Node', 'Address', 'Meta', 'PeerName']], From d1f5d9b90535e9f348ba42c2e1bb8866667d15be Mon Sep 17 00:00:00 2001 From: Eddie Rowe <74205376+eddie-rowe@users.noreply.github.com> Date: Wed, 12 Jul 2023 16:43:22 -0500 Subject: [PATCH 21/43] api gw 1.16 updates (#18081) * api gw 1.16 updates * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * update CodeBlockConfig filename * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * remove non-standard intentions page * Update website/content/docs/api-gateway/configuration/index.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- .../docs/api-gateway/configuration/gateway.mdx | 3 +-- .../docs/api-gateway/configuration/index.mdx | 3 ++- .../docs/api-gateway/configuration/routes.mdx | 4 ++-- website/content/docs/api-gateway/install.mdx | 2 ++ .../content/docs/api-gateway/usage/usage.mdx | 17 ++++++----------- 5 files changed, 13 insertions(+), 16 deletions(-) diff --git a/website/content/docs/api-gateway/configuration/gateway.mdx b/website/content/docs/api-gateway/configuration/gateway.mdx index fdbe468ba826..3c2a1f6fd6a6 100644 --- a/website/content/docs/api-gateway/configuration/gateway.mdx +++ b/website/content/docs/api-gateway/configuration/gateway.mdx @@ -26,7 +26,6 @@ Specify the following parameters to declare a `Gateway`: | `fields` | Specifies the configurations for the Gateway. The fields are listed in the [configuration model](#configuration-model). Details for each field are described in the [specification](#specification). | Required | - ## Configuration model The following outline shows how to format the configurations in the `Gateway` object. Click on a property name to view details about the configuration. @@ -196,7 +195,7 @@ The following example creates a `Gateway` named `example-gateway` in namespace ` name: example-gateway namespace: gateway-namespace spec: - gatewayClassName: consul-api-gateway + gatewayClassName: consul listeners: - protocol: HTTPS port: 443 diff --git a/website/content/docs/api-gateway/configuration/index.mdx b/website/content/docs/api-gateway/configuration/index.mdx index b2d0b19a52d3..06875ebd4b35 100644 --- a/website/content/docs/api-gateway/configuration/index.mdx +++ b/website/content/docs/api-gateway/configuration/index.mdx @@ -13,8 +13,9 @@ This topic provides an overview of the configuration items that enable Consul AP - [GatewayClass](/consul/docs/api-gateway/configuration/gatewayclass) defines a class of gateway resources that you can use as a template for creating gateways. - [GatewayClassConfig](/consul/docs/api-gateway/configuration/gatewayclassconfig) describes additional Consul API Gateway-related configuration parameters for the GatewayClass resource. - [Routes](/consul/docs/api-gateway/configuration/routes) specifies the path from the gateway to the backend service(s) client to the listener. +- [Intentions](/consul/docs/connect/config-entries/service-intentions) specify traffic communication rules between services in the mesh. Intentions also enforce rules for service-to-service traffic routed through a Consul API gateway. -You can create a basic Gateway object using the default [`gatewayClassName`](/consul/docs/api-gateway/configuration/gateway#gatewayclassname) (`consul-api-gateway`). If you want to create custom Gateways suitable for your environment, complete the following steps: +You can create a basic Gateway object using the default [`gatewayClassName`](/consul/docs/api-gateway/configuration/gateway#gatewayclassname) (`consul`). If you want to create custom Gateways suitable for your environment, complete the following steps: 1. Define a [GatewayClassConfig](/consul/docs/api-gateway/configuration/gatewayclassconfig) that contains your custom configurations. 1. Define a [GatewayClass](/consul/docs/api-gateway/configuration/gatewayclass) and configure the [`parametersRef.name`](/consul/docs/api-gateway/configuration/gatewayclass#parametersref-name) to reference the name of your [GatewayClassConfig](/consul/docs/api-gateway/configuration/gatewayclassconfig). diff --git a/website/content/docs/api-gateway/configuration/routes.mdx b/website/content/docs/api-gateway/configuration/routes.mdx index 27b520340248..973ebed0251b 100644 --- a/website/content/docs/api-gateway/configuration/routes.mdx +++ b/website/content/docs/api-gateway/configuration/routes.mdx @@ -22,7 +22,7 @@ The following example creates a route named `example-route` associated with a li ```yaml - apiVersion: gateway.networking.k8s.io/v1alpha2 + apiVersion: gateway.networking.k8s.io/v1beta1 kind: HTTPRoute metadata: name: example-route @@ -132,7 +132,7 @@ The following example creates a route named `example-route` in namespace `gatewa ```yaml - apiVersion: gateway.networking.k8s.io/v1alpha2 + apiVersion: gateway.networking.k8s.io/v1beta1 kind: HTTPRoute metadata: name: example-route diff --git a/website/content/docs/api-gateway/install.mdx b/website/content/docs/api-gateway/install.mdx index dff25ee3350d..3dc4ca7289ab 100644 --- a/website/content/docs/api-gateway/install.mdx +++ b/website/content/docs/api-gateway/install.mdx @@ -18,6 +18,8 @@ The Consul API gateway ships with Consul and is automatically installed when you name: consul connectInject: enabled: true + apiGateway: + manageExternalCRDs: true ``` diff --git a/website/content/docs/api-gateway/usage/usage.mdx b/website/content/docs/api-gateway/usage/usage.mdx index e815d21c5fda..cee7b0a7205e 100644 --- a/website/content/docs/api-gateway/usage/usage.mdx +++ b/website/content/docs/api-gateway/usage/usage.mdx @@ -14,28 +14,23 @@ This topic describes how to use Consul API Gateway. Complete the following steps to use Consul API Gateway in your network. 1. Verify that the [requirements](/consul/docs/api-gateway/tech-specs) have been met. -1. Verify that the Consul API Gateway CRDs and controller have been installed and applied. Refer to [Installation](/consul/docs/api-gateway/install) for details. +1. Verify that the Consul API Gateway CRDs were applied. Refer to [Installation](/consul/docs/api-gateway/install) for details. ## Configuration -Configure your [`Gateway`](/consul/docs/api-gateway/configuration/gateway) and [`Routes`](/consul/docs/api-gateway/configuration/routes) as described in [Consul API Gateway Configuration](/consul/docs/api-gateway/configuration). +Configure the following resources for your environment as described in [Consul API Gateway Configuration](/consul/docs/api-gateway/configuration). - +1. [`Gateway`](/consul/docs/api-gateway/configuration/gateway) +1. [`Routes`](/consul/docs/api-gateway/configuration/routes) +1. [`Intentions`](/consul/docs/connect/config-entries/service-intentions) - ```yaml - apiGateway: - enabled: true - managedGatewayClass: - ``` - - ## Apply configurations Issue the `kubectl apply` command to implement the configurations: ```shell-session -$ kubectl apply -f gateway.yaml routes.yaml +$ kubectl apply -f gateway.yaml routes.yaml intentions.yaml ``` From 3b3aa1f26064eb00cfee21340b0df54f860ab1d5 Mon Sep 17 00:00:00 2001 From: Dan Bond Date: Wed, 12 Jul 2023 16:10:34 -0700 Subject: [PATCH 22/43] [NET-4103] ci: build s390x (#18067) * ci: build s390x * ci: test s390x * ci: dev build s390x * no GOOS * ent only * build: publish s390x * fix syntax error * fix syntax error again * fix syntax error again x2 * test branch * Move s390x conditionals to step level * remove test branch --------- Co-authored-by: emilymianeil --- .github/workflows/build-distros.yml | 48 +++++--- .github/workflows/build.yml | 59 ++++++++- .github/workflows/go-tests.yml | 145 ++++++++++++++--------- .github/workflows/reusable-dev-build.yml | 2 +- 4 files changed, 182 insertions(+), 72 deletions(-) diff --git a/.github/workflows/build-distros.yml b/.github/workflows/build-distros.yml index 8b88345d2ee2..10c520893341 100644 --- a/.github/workflows/build-distros.yml +++ b/.github/workflows/build-distros.yml @@ -2,7 +2,7 @@ # It is aimed at checking new commits don't introduce any breaking build changes. name: build-distros -on: +on: pull_request: push: branches: @@ -33,7 +33,7 @@ jobs: run: ./.github/scripts/get_runner_classes.sh check-go-mod: - needs: + needs: - setup uses: ./.github/workflows/reusable-check-go-mod.yml with: @@ -43,8 +43,8 @@ jobs: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} build-386: - needs: - - setup + needs: + - setup - check-go-mod env: XC_OS: "freebsd linux windows" @@ -56,7 +56,7 @@ jobs: - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 with: go-version-file: 'go.mod' @@ -68,7 +68,7 @@ jobs: build-amd64: needs: - - setup + - setup - check-go-mod env: XC_OS: "darwin freebsd linux solaris windows" @@ -80,7 +80,7 @@ jobs: - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 with: go-version-file: 'go.mod' @@ -92,7 +92,7 @@ jobs: build-arm: needs: - - setup + - setup - check-go-mod runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} env: @@ -105,7 +105,7 @@ jobs: - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 with: @@ -117,6 +117,26 @@ jobs: - run: CC=arm-linux-gnueabihf-gcc GOARCH=arm GOARM=6 go build -tags "${{ env.GOTAGS }}" - run: CC=aarch64-linux-gnu-gcc GOARCH=arm64 go build -tags "${{ env.GOTAGS }}" + + build-s390x: + if: ${{ endsWith(github.repository, '-enterprise') }} + needs: + - setup + - check-go-mod + runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + + # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. + - name: Setup Git + run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" + + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version-file: 'go.mod' + - name: Build + run: GOOS=linux GOARCH=s390x CGO_ENABLED=0 go build -tags "${{ env.GOTAGS }}" + # This is job is required for branch protection as a required gihub check # because GitHub actions show up as checks at the job level and not the # workflow level. This is currently a feature request: @@ -126,18 +146,18 @@ jobs: # - be placed after the fanout of a workflow so that everything fans back in # to this job. # - "need" any job that is part of the fan out / fan in - # - implement the if logic because we have conditional jobs - # (go-test-enteprise) that this job needs and this would potentially get - # skipped if a previous job got skipped. So we use the if clause to make + # - implement the if logic because we have conditional jobs + # (go-test-enteprise) that this job needs and this would potentially get + # skipped if a previous job got skipped. So we use the if clause to make # sure it does not get skipped. - build-distros-success: - needs: + needs: - setup - check-go-mod - build-386 - build-amd64 - build-arm + - build-s390x runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} if: ${{ always() }} steps: diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9186f12bfe25..c788a1536c25 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -57,6 +57,7 @@ jobs: echo "Product Date: ${{ needs.set-product-version.outputs.product-date }}" echo "Prerelease Version: ${{ needs.set-product-version.outputs.pre-version }}" echo "Ldflags: ${{ needs.set-product-version.outputs.shared-ldflags }}" + generate-metadata-file: needs: set-product-version runs-on: ubuntu-latest @@ -173,6 +174,57 @@ jobs: name: ${{ env.DEB_PACKAGE }} path: out/${{ env.DEB_PACKAGE }} + build-s390x: + needs: set-product-version + if: ${{ endsWith(github.repository, '-enterprise') }} + runs-on: ubuntu-latest + strategy: + matrix: + include: + - {go: "1.20.4", goos: "linux", goarch: "s390x"} + fail-fast: true + + name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + + - name: Setup with node and yarn + uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + with: + node-version: '14' + cache: 'yarn' + cache-dependency-path: 'ui/yarn.lock' + + - name: Build UI + run: | + CONSUL_VERSION=${{ needs.set-product-version.outputs.product-version }} + CONSUL_DATE=${{ needs.set-product-version.outputs.product-date }} + CONSUL_BINARY_TYPE=${CONSUL_BINARY_TYPE} + CONSUL_COPYRIGHT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD) + echo "consul_version is ${CONSUL_VERSION}" + echo "consul_date is ${CONSUL_DATE}" + echo "consul binary type is ${CONSUL_BINARY_TYPE}" + echo "consul copyright year is ${CONSUL_COPYRIGHT_YEAR}" + cd ui && make && cd .. + rm -rf agent/uiserver/dist + mv ui/packages/consul-ui/dist agent/uiserver/ + - name: Go Build + env: + PRODUCT_VERSION: ${{ needs.set-product-version.outputs.product-version }} + PRERELEASE_VERSION: ${{ needs.set-product-version.outputs.pre-version }} + CGO_ENABLED: "0" + GOLDFLAGS: "${{needs.set-product-version.outputs.shared-ldflags}}" + uses: hashicorp/actions-go-build@v0.1.7 + with: + product_name: ${{ env.PKG_NAME }} + product_version: ${{ needs.set-product-version.outputs.product-version }} + go_version: ${{ matrix.go }} + os: ${{ matrix.goos }} + arch: ${{ matrix.goarch }} + reproducible: report + instructions: |- + go build -ldflags="$GOLDFLAGS" -o "$BIN_PATH" -trimpath -buildvcs=false + build-darwin: needs: set-product-version runs-on: macos-latest @@ -186,7 +238,7 @@ jobs: name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - + - name: Setup with node and yarn uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 with: @@ -319,7 +371,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - arch: ["386", "amd64", "arm", "arm64"] + arch: ["386", "amd64", "arm", "arm64", "s390x"] fail-fast: true env: version: ${{ needs.set-product-version.outputs.product-version }} @@ -328,8 +380,10 @@ jobs: name: Verify ${{ matrix.arch }} linux binary steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + if: ${{ endsWith(github.repository, '-enterprise') || matrix.arch != 's390x' }} - name: Download ${{ matrix.arch }} zip + if: ${{ endsWith(github.repository, '-enterprise') || matrix.arch != 's390x' }} uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: name: ${{ env.zip_name }} @@ -342,6 +396,7 @@ jobs: platforms: arm,arm64 - name: Run verification for ${{ matrix.arch }} binary + if: ${{ endsWith(github.repository, '-enterprise') || matrix.arch != 's390x' }} run: .github/scripts/verify_artifact.sh ${{ env.zip_name }} v${{ env.version }} verify-darwin: diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 831271f6f832..9baf90c505ed 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -1,6 +1,6 @@ name: go-tests -on: +on: pull_request: branches-ignore: - stable-website @@ -15,7 +15,7 @@ on: # Push events on the main branch - main - release/** - + permissions: contents: read @@ -39,7 +39,7 @@ jobs: run: ./.github/scripts/get_runner_classes.sh check-go-mod: - needs: + needs: - setup uses: ./.github/workflows/reusable-check-go-mod.yml with: @@ -49,8 +49,8 @@ jobs: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} check-generated-protobuf: - needs: - - setup + needs: + - setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-medium) }} steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 @@ -75,10 +75,10 @@ jobs: name: "Protobuf Lint" - name: Notify Slack if: ${{ failure() }} - run: .github/scripts/notify_slack.sh + run: .github/scripts/notify_slack.sh check-generated-deep-copy: - needs: - - setup + needs: + - setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 @@ -100,8 +100,8 @@ jobs: run: .github/scripts/notify_slack.sh lint-enums: - needs: - - setup + needs: + - setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 @@ -112,7 +112,7 @@ jobs: - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 with: go-version-file: 'go.mod' - - run: go install github.com/reillywatson/enumcover/cmd/enumcover@master && enumcover ./... + - run: go install github.com/reillywatson/enumcover/cmd/enumcover@master && enumcover ./... - name: Notify Slack if: ${{ failure() }} run: .github/scripts/notify_slack.sh @@ -135,8 +135,8 @@ jobs: run: .github/scripts/notify_slack.sh lint-consul-retry: - needs: - - setup + needs: + - setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 @@ -153,8 +153,8 @@ jobs: run: .github/scripts/notify_slack.sh lint: - needs: - - setup + needs: + - setup uses: ./.github/workflows/reusable-lint.yml with: runs-on: ${{ needs.setup.outputs.compute-xl }} @@ -163,8 +163,8 @@ jobs: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} lint-32bit: - needs: - - setup + needs: + - setup uses: ./.github/workflows/reusable-lint.yml with: go-arch: "386" @@ -175,20 +175,33 @@ jobs: # create a development build dev-build: - needs: - - setup + needs: + - setup uses: ./.github/workflows/reusable-dev-build.yml - with: + with: runs-on: ${{ needs.setup.outputs.compute-xl }} repository-name: ${{ github.repository }} secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + dev-build-s390x: + if: ${{ endsWith(github.repository, '-enterprise') }} + needs: + - setup + uses: ./.github/workflows/reusable-dev-build.yml + with: + uploaded-binary-name: 'consul-bin-s390x' + runs-on: ${{ needs.setup.outputs.compute-xl }} + go-arch: "s390x" + repository-name: ${{ github.repository }} + secrets: + elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # dev-build-arm64: # # only run on enterprise because GHA does not have arm64 runners in OSS # if: ${{ endsWith(github.repository, '-enterprise') }} - # needs: - # - setup + # needs: + # - setup # uses: ./.github/workflows/reusable-dev-build.yml # with: # uploaded-binary-name: 'consul-bin-arm64' @@ -201,8 +214,8 @@ jobs: # go-test-arm64: # # only run on enterprise because GHA does not have arm64 runners in OSS # if: ${{ endsWith(github.repository, '-enterprise') }} - # needs: - # - setup + # needs: + # - setup # - dev-build-arm64 # uses: ./.github/workflows/reusable-unit-split.yml # with: @@ -218,8 +231,8 @@ jobs: # datadog-api-key: "${{ !endsWith(github.repository, '-enterprise') && secrets.DATADOG_API_KEY || '' }}" go-test-oss: - needs: - - setup + needs: + - setup - dev-build uses: ./.github/workflows/reusable-unit-split.yml with: @@ -230,7 +243,7 @@ jobs: go-tags: "" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} @@ -238,8 +251,8 @@ jobs: go-test-enterprise: if: ${{ endsWith(github.repository, '-enterprise') }} - needs: - - setup + needs: + - setup - dev-build uses: ./.github/workflows/reusable-unit-split.yml with: @@ -250,15 +263,15 @@ jobs: go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} datadog-api-key: "${{ !endsWith(github.repository, '-enterprise') && secrets.DATADOG_API_KEY || '' }}" go-test-race: - needs: - - setup + needs: + - setup - dev-build uses: ./.github/workflows/reusable-unit.yml with: @@ -270,15 +283,15 @@ jobs: go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} datadog-api-key: "${{ !endsWith(github.repository, '-enterprise') && secrets.DATADOG_API_KEY || '' }}" go-test-32bit: - needs: - - setup + needs: + - setup - dev-build uses: ./.github/workflows/reusable-unit.yml with: @@ -290,7 +303,28 @@ jobs: go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read + secrets: + elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + consul-license: ${{secrets.CONSUL_LICENSE}} + datadog-api-key: "${{ !endsWith(github.repository, '-enterprise') && secrets.DATADOG_API_KEY || '' }}" + + go-test-s390x: + if: ${{ endsWith(github.repository, '-enterprise') }} + needs: + - setup + - dev-build-s390x + uses: ./.github/workflows/reusable-unit.yml + with: + uploaded-binary-name: 'consul-bin-s390x' + directory: . + go-test-flags: 'export GO_TEST_FLAGS="-short"' + runs-on: ${{ needs.setup.outputs.compute-xl }} + repository-name: ${{ github.repository }} + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" + permissions: + id-token: write # NOTE: this permission is explicitly required for Vault auth. + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} @@ -308,7 +342,7 @@ jobs: go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} @@ -326,15 +360,15 @@ jobs: go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} datadog-api-key: "${{ !endsWith(github.repository, '-enterprise') && secrets.DATADOG_API_KEY || '' }}" go-test-api-1-19: - needs: - - setup + needs: + - setup - dev-build uses: ./.github/workflows/reusable-unit.yml with: @@ -344,15 +378,15 @@ jobs: go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} datadog-api-key: "${{ !endsWith(github.repository, '-enterprise') && secrets.DATADOG_API_KEY || '' }}" go-test-api-1-20: - needs: - - setup + needs: + - setup - dev-build uses: ./.github/workflows/reusable-unit.yml with: @@ -362,15 +396,15 @@ jobs: go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} datadog-api-key: "${{ !endsWith(github.repository, '-enterprise') && secrets.DATADOG_API_KEY || '' }}" go-test-sdk-1-19: - needs: - - setup + needs: + - setup - dev-build uses: ./.github/workflows/reusable-unit.yml with: @@ -380,15 +414,15 @@ jobs: go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} datadog-api-key: "${{ !endsWith(github.repository, '-enterprise') && secrets.DATADOG_API_KEY || '' }}" go-test-sdk-1-20: - needs: - - setup + needs: + - setup - dev-build uses: ./.github/workflows/reusable-unit.yml with: @@ -398,7 +432,7 @@ jobs: go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read + contents: read secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} consul-license: ${{secrets.CONSUL_LICENSE}} @@ -418,13 +452,13 @@ jobs: # - be placed after the fanout of a workflow so that everything fans back in # to this job. # - "need" any job that is part of the fan out / fan in - # - implement the if logic because we have conditional jobs - # (go-test-enteprise) that this job needs and this would potentially get - # skipped if a previous job got skipped. So we use the if clause to make + # - implement the if logic because we have conditional jobs + # (go-test-enteprise) that this job needs and this would potentially get + # skipped if a previous job got skipped. So we use the if clause to make # sure it does not get skipped. go-tests-success: - needs: + needs: - setup - check-generated-deep-copy - check-generated-protobuf @@ -436,7 +470,7 @@ jobs: - lint-32bit # - go-test-arm64 - go-test-enterprise - - go-test-oss + - go-test-oss - go-test-race - go-test-envoyextensions - go-test-troubleshoot @@ -445,6 +479,7 @@ jobs: - go-test-sdk-1-19 - go-test-sdk-1-20 - go-test-32bit + - go-test-s390x runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} if: ${{ always() }} steps: diff --git a/.github/workflows/reusable-dev-build.yml b/.github/workflows/reusable-dev-build.yml index 3ca661398506..2db9670655e4 100644 --- a/.github/workflows/reusable-dev-build.yml +++ b/.github/workflows/reusable-dev-build.yml @@ -1,4 +1,4 @@ -name: reusable-dev-build +name: reusable-dev-build on: workflow_call: From efe981637b44d9e154474de2ba5ca6e6ecb4ca8d Mon Sep 17 00:00:00 2001 From: nv-hashi <80716011+nv-hashi@users.noreply.github.com> Date: Wed, 12 Jul 2023 19:46:16 -0600 Subject: [PATCH 23/43] :ermahgerd "Sevice Mesh" -> "Service Mesh" (#18116) Just a typo in the docs. --- website/content/docs/connect/nomad.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/connect/nomad.mdx b/website/content/docs/connect/nomad.mdx index cc3a7c9034df..c65f07bc9162 100644 --- a/website/content/docs/connect/nomad.mdx +++ b/website/content/docs/connect/nomad.mdx @@ -1,6 +1,6 @@ --- layout: docs -page_title: Sevice Mesh - Nomad Integration +page_title: Service Mesh - Nomad Integration description: >- Consul's service mesh can be applied to provide secure communication between services managed by Nomad's scheduler and orchestrator functions, including Nomad jobs and task groups. Use the guide and reference documentation to learn more. --- From c328ba85bd2bfc934706a651c29e51907280d40a Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Thu, 13 Jul 2023 13:06:56 -0600 Subject: [PATCH 24/43] Split pbmesh.UpstreamsConfiguration as a resource out of pbmesh.Upstreams (#17991) Configuration that previously was inlined into the Upstreams resource applies to both explicit and implicit upstreams and so it makes sense to split it out into its own resource. It also has other minor changes: - Renames `proxy.proto` proxy_configuration.proto` - Changes the type of `Upstream.destination_ref` from `pbresource.ID` to `pbresource.Reference` - Adds comments to fields that didn't have them --- internal/mesh/exports.go | 5 +- .../internal/types/proxy_configuration.go | 2 +- internal/mesh/internal/types/types.go | 1 + internal/mesh/internal/types/upstreams.go | 2 +- .../internal/types/upstreams_configuration.go | 32 + ...ry.go => proxy_configuration.pb.binary.go} | 2 +- ...{proxy.pb.go => proxy_configuration.pb.go} | 432 +++++------ ...{proxy.proto => proxy_configuration.proto} | 0 .../pbmesh/v1alpha1/upstreams.pb.binary.go | 34 +- proto-public/pbmesh/v1alpha1/upstreams.pb.go | 628 ++++------------ proto-public/pbmesh/v1alpha1/upstreams.proto | 82 +-- .../upstreams_configuration.pb.binary.go | 58 ++ .../v1alpha1/upstreams_configuration.pb.go | 682 ++++++++++++++++++ .../v1alpha1/upstreams_configuration.proto | 103 +++ 14 files changed, 1278 insertions(+), 785 deletions(-) create mode 100644 internal/mesh/internal/types/upstreams_configuration.go rename proto-public/pbmesh/v1alpha1/{proxy.pb.binary.go => proxy_configuration.pb.binary.go} (96%) rename proto-public/pbmesh/v1alpha1/{proxy.pb.go => proxy_configuration.pb.go} (51%) rename proto-public/pbmesh/v1alpha1/{proxy.proto => proxy_configuration.proto} (100%) create mode 100644 proto-public/pbmesh/v1alpha1/upstreams_configuration.pb.binary.go create mode 100644 proto-public/pbmesh/v1alpha1/upstreams_configuration.pb.go create mode 100644 proto-public/pbmesh/v1alpha1/upstreams_configuration.proto diff --git a/internal/mesh/exports.go b/internal/mesh/exports.go index 753c10a7ba93..c73ebdb097f5 100644 --- a/internal/mesh/exports.go +++ b/internal/mesh/exports.go @@ -22,8 +22,9 @@ var ( // Resource Types for the v1alpha1 version. - ProxyConfigurationV1Alpha1Type = types.ProxyConfigurationV1Alpha1Type - UpstreamsV1Alpha1Type = types.UpstreamsV1Alpha1Type + ProxyConfigurationV1Alpha1Type = types.ProxyConfigurationV1Alpha1Type + UpstreamsV1Alpha1Type = types.UpstreamsV1Alpha1Type + UpstreamsConfigurationV1Alpha1Type = types.UpstreamsConfigurationV1Alpha1Type ) // RegisterTypes adds all resource types within the "catalog" API group diff --git a/internal/mesh/internal/types/proxy_configuration.go b/internal/mesh/internal/types/proxy_configuration.go index 9205dc81b132..3349090b524a 100644 --- a/internal/mesh/internal/types/proxy_configuration.go +++ b/internal/mesh/internal/types/proxy_configuration.go @@ -16,7 +16,7 @@ const ( var ( ProxyConfigurationV1Alpha1Type = &pbresource.Type{ Group: GroupName, - GroupVersion: CurrentVersion, + GroupVersion: VersionV1Alpha1, Kind: ProxyConfigurationKind, } diff --git a/internal/mesh/internal/types/types.go b/internal/mesh/internal/types/types.go index 3eeb69bd101c..3a7c6a329ac4 100644 --- a/internal/mesh/internal/types/types.go +++ b/internal/mesh/internal/types/types.go @@ -16,4 +16,5 @@ const ( func Register(r resource.Registry) { RegisterProxyConfiguration(r) RegisterUpstreams(r) + RegisterUpstreamsConfiguration(r) } diff --git a/internal/mesh/internal/types/upstreams.go b/internal/mesh/internal/types/upstreams.go index 54fd14b098d5..8ccb1554593b 100644 --- a/internal/mesh/internal/types/upstreams.go +++ b/internal/mesh/internal/types/upstreams.go @@ -16,7 +16,7 @@ const ( var ( UpstreamsV1Alpha1Type = &pbresource.Type{ Group: GroupName, - GroupVersion: CurrentVersion, + GroupVersion: VersionV1Alpha1, Kind: UpstreamsKind, } diff --git a/internal/mesh/internal/types/upstreams_configuration.go b/internal/mesh/internal/types/upstreams_configuration.go new file mode 100644 index 000000000000..0f4db27d96e7 --- /dev/null +++ b/internal/mesh/internal/types/upstreams_configuration.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package types + +import ( + "github.com/hashicorp/consul/internal/resource" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +const ( + UpstreamsConfigurationKind = "UpstreamsConfiguration" +) + +var ( + UpstreamsConfigurationV1Alpha1Type = &pbresource.Type{ + Group: GroupName, + GroupVersion: VersionV1Alpha1, + Kind: UpstreamsConfigurationKind, + } + + UpstreamsConfigurationType = UpstreamsConfigurationV1Alpha1Type +) + +func RegisterUpstreamsConfiguration(r resource.Registry) { + r.Register(resource.Registration{ + Type: UpstreamsConfigurationV1Alpha1Type, + Proto: &pbmesh.UpstreamsConfiguration{}, + Validate: nil, + }) +} diff --git a/proto-public/pbmesh/v1alpha1/proxy.pb.binary.go b/proto-public/pbmesh/v1alpha1/proxy_configuration.pb.binary.go similarity index 96% rename from proto-public/pbmesh/v1alpha1/proxy.pb.binary.go rename to proto-public/pbmesh/v1alpha1/proxy_configuration.pb.binary.go index f39ae6afec84..5238331fb7f9 100644 --- a/proto-public/pbmesh/v1alpha1/proxy.pb.binary.go +++ b/proto-public/pbmesh/v1alpha1/proxy_configuration.pb.binary.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-binary. DO NOT EDIT. -// source: pbmesh/v1alpha1/proxy.proto +// source: pbmesh/v1alpha1/proxy_configuration.proto package meshv1alpha1 diff --git a/proto-public/pbmesh/v1alpha1/proxy.pb.go b/proto-public/pbmesh/v1alpha1/proxy_configuration.pb.go similarity index 51% rename from proto-public/pbmesh/v1alpha1/proxy.pb.go rename to proto-public/pbmesh/v1alpha1/proxy_configuration.pb.go index b6ef2edf873b..6ce928b15384 100644 --- a/proto-public/pbmesh/v1alpha1/proxy.pb.go +++ b/proto-public/pbmesh/v1alpha1/proxy_configuration.pb.go @@ -5,7 +5,7 @@ // versions: // protoc-gen-go v1.30.0 // protoc (unknown) -// source: pbmesh/v1alpha1/proxy.proto +// source: pbmesh/v1alpha1/proxy_configuration.proto package meshv1alpha1 @@ -66,11 +66,11 @@ func (x ProxyMode) String() string { } func (ProxyMode) Descriptor() protoreflect.EnumDescriptor { - return file_pbmesh_v1alpha1_proxy_proto_enumTypes[0].Descriptor() + return file_pbmesh_v1alpha1_proxy_configuration_proto_enumTypes[0].Descriptor() } func (ProxyMode) Type() protoreflect.EnumType { - return &file_pbmesh_v1alpha1_proxy_proto_enumTypes[0] + return &file_pbmesh_v1alpha1_proxy_configuration_proto_enumTypes[0] } func (x ProxyMode) Number() protoreflect.EnumNumber { @@ -79,7 +79,7 @@ func (x ProxyMode) Number() protoreflect.EnumNumber { // Deprecated: Use ProxyMode.Descriptor instead. func (ProxyMode) EnumDescriptor() ([]byte, []int) { - return file_pbmesh_v1alpha1_proxy_proto_rawDescGZIP(), []int{0} + return file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescGZIP(), []int{0} } type ProxyConfiguration struct { @@ -99,14 +99,14 @@ type ProxyConfiguration struct { // deprecated: prevent usage when using v2 APIs directly. // needed for backwards compatibility // - // Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy.proto. + // Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy_configuration.proto. OpaqueConfig *structpb.Struct `protobuf:"bytes,4,opt,name=opaque_config,json=opaqueConfig,proto3" json:"opaque_config,omitempty"` } func (x *ProxyConfiguration) Reset() { *x = ProxyConfiguration{} if protoimpl.UnsafeEnabled { - mi := &file_pbmesh_v1alpha1_proxy_proto_msgTypes[0] + mi := &file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -119,7 +119,7 @@ func (x *ProxyConfiguration) String() string { func (*ProxyConfiguration) ProtoMessage() {} func (x *ProxyConfiguration) ProtoReflect() protoreflect.Message { - mi := &file_pbmesh_v1alpha1_proxy_proto_msgTypes[0] + mi := &file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -132,7 +132,7 @@ func (x *ProxyConfiguration) ProtoReflect() protoreflect.Message { // Deprecated: Use ProxyConfiguration.ProtoReflect.Descriptor instead. func (*ProxyConfiguration) Descriptor() ([]byte, []int) { - return file_pbmesh_v1alpha1_proxy_proto_rawDescGZIP(), []int{0} + return file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescGZIP(), []int{0} } func (x *ProxyConfiguration) GetWorkloads() *v1alpha1.WorkloadSelector { @@ -156,7 +156,7 @@ func (x *ProxyConfiguration) GetBootstrapConfig() *BootstrapConfig { return nil } -// Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy.proto. +// Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy_configuration.proto. func (x *ProxyConfiguration) GetOpaqueConfig() *structpb.Struct { if x != nil { return x.OpaqueConfig @@ -187,18 +187,18 @@ type DynamicConfig struct { // local_workload_address, local_workload_port, and local_workload_socket_path // are deprecated and are only needed for migration of existing resources. // - // Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy.proto. + // Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy_configuration.proto. LocalWorkloadAddress string `protobuf:"bytes,10,opt,name=local_workload_address,json=localWorkloadAddress,proto3" json:"local_workload_address,omitempty"` - // Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy.proto. + // Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy_configuration.proto. LocalWorkloadPort uint32 `protobuf:"varint,11,opt,name=local_workload_port,json=localWorkloadPort,proto3" json:"local_workload_port,omitempty"` - // Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy.proto. + // Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy_configuration.proto. LocalWorkloadSocketPath string `protobuf:"bytes,12,opt,name=local_workload_socket_path,json=localWorkloadSocketPath,proto3" json:"local_workload_socket_path,omitempty"` } func (x *DynamicConfig) Reset() { *x = DynamicConfig{} if protoimpl.UnsafeEnabled { - mi := &file_pbmesh_v1alpha1_proxy_proto_msgTypes[1] + mi := &file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -211,7 +211,7 @@ func (x *DynamicConfig) String() string { func (*DynamicConfig) ProtoMessage() {} func (x *DynamicConfig) ProtoReflect() protoreflect.Message { - mi := &file_pbmesh_v1alpha1_proxy_proto_msgTypes[1] + mi := &file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -224,7 +224,7 @@ func (x *DynamicConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use DynamicConfig.ProtoReflect.Descriptor instead. func (*DynamicConfig) Descriptor() ([]byte, []int) { - return file_pbmesh_v1alpha1_proxy_proto_rawDescGZIP(), []int{1} + return file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescGZIP(), []int{1} } func (x *DynamicConfig) GetMode() ProxyMode { @@ -290,7 +290,7 @@ func (x *DynamicConfig) GetLocalClusterJson() string { return "" } -// Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy.proto. +// Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy_configuration.proto. func (x *DynamicConfig) GetLocalWorkloadAddress() string { if x != nil { return x.LocalWorkloadAddress @@ -298,7 +298,7 @@ func (x *DynamicConfig) GetLocalWorkloadAddress() string { return "" } -// Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy.proto. +// Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy_configuration.proto. func (x *DynamicConfig) GetLocalWorkloadPort() uint32 { if x != nil { return x.LocalWorkloadPort @@ -306,7 +306,7 @@ func (x *DynamicConfig) GetLocalWorkloadPort() uint32 { return 0 } -// Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy.proto. +// Deprecated: Marked as deprecated in pbmesh/v1alpha1/proxy_configuration.proto. func (x *DynamicConfig) GetLocalWorkloadSocketPath() string { if x != nil { return x.LocalWorkloadSocketPath @@ -330,7 +330,7 @@ type TransparentProxy struct { func (x *TransparentProxy) Reset() { *x = TransparentProxy{} if protoimpl.UnsafeEnabled { - mi := &file_pbmesh_v1alpha1_proxy_proto_msgTypes[2] + mi := &file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -343,7 +343,7 @@ func (x *TransparentProxy) String() string { func (*TransparentProxy) ProtoMessage() {} func (x *TransparentProxy) ProtoReflect() protoreflect.Message { - mi := &file_pbmesh_v1alpha1_proxy_proto_msgTypes[2] + mi := &file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -356,7 +356,7 @@ func (x *TransparentProxy) ProtoReflect() protoreflect.Message { // Deprecated: Use TransparentProxy.ProtoReflect.Descriptor instead. func (*TransparentProxy) Descriptor() ([]byte, []int) { - return file_pbmesh_v1alpha1_proxy_proto_rawDescGZIP(), []int{2} + return file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescGZIP(), []int{2} } func (x *TransparentProxy) GetOutboundListenerPort() uint32 { @@ -398,7 +398,7 @@ type BootstrapConfig struct { func (x *BootstrapConfig) Reset() { *x = BootstrapConfig{} if protoimpl.UnsafeEnabled { - mi := &file_pbmesh_v1alpha1_proxy_proto_msgTypes[3] + mi := &file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -411,7 +411,7 @@ func (x *BootstrapConfig) String() string { func (*BootstrapConfig) ProtoMessage() {} func (x *BootstrapConfig) ProtoReflect() protoreflect.Message { - mi := &file_pbmesh_v1alpha1_proxy_proto_msgTypes[3] + mi := &file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -424,7 +424,7 @@ func (x *BootstrapConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use BootstrapConfig.ProtoReflect.Descriptor instead. func (*BootstrapConfig) Descriptor() ([]byte, []int) { - return file_pbmesh_v1alpha1_proxy_proto_rawDescGZIP(), []int{3} + return file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescGZIP(), []int{3} } func (x *BootstrapConfig) GetStatsdUrl() string { @@ -518,192 +518,194 @@ func (x *BootstrapConfig) GetTracingConfigJson() string { return "" } -var File_pbmesh_v1alpha1_proxy_proto protoreflect.FileDescriptor - -var file_pbmesh_v1alpha1_proxy_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, - 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, 0x1c, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x70, 0x62, 0x63, - 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, - 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, - 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1c, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2f, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, +var File_pbmesh_v1alpha1_proxy_configuration_proto protoreflect.FileDescriptor + +var file_pbmesh_v1alpha1_proxy_configuration_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, + 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x70, 0x62, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x70, 0x62, + 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, - 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdb, 0x02, - 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, - 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, - 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x77, 0x6f, - 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x54, 0x0a, 0x0e, 0x64, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, - 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, - 0x10, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, - 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, - 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0d, 0x6f, 0x70, 0x61, - 0x71, 0x75, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x6f, - 0x70, 0x61, 0x71, 0x75, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xf0, 0x07, 0x0a, 0x0d, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, - 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x5d, 0x0a, 0x11, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x78, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x6d, 0x0a, 0x10, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x62, + 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x72, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdb, 0x02, 0x0a, 0x12, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, + 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x54, 0x0a, 0x0e, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x64, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, 0x10, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x69, 0x0a, 0x13, 0x69, 0x6e, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, + 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0d, 0x6f, 0x70, 0x61, 0x71, 0x75, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x6f, 0x70, 0x61, + 0x71, 0x75, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xf0, 0x07, 0x0a, 0x0d, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x04, 0x6d, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, + 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x5d, 0x0a, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x6d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x69, 0x0a, 0x13, 0x69, 0x6e, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x12, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x52, + 0x0f, 0x6d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, 0x65, + 0x12, 0x51, 0x0a, 0x0d, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x12, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x67, 0x61, - 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x54, 0x72, + 0x61, 0x63, 0x69, 0x6e, 0x67, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x14, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x32, 0x0a, 0x13, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3f, 0x0a, 0x1a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x77, + 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x17, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x1a, 0x74, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x71, 0x0a, 0x10, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x12, 0x34, 0x0a, 0x16, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x14, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x64, + 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x22, + 0xc0, 0x04, 0x0a, 0x0f, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x55, + 0x72, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x67, 0x73, 0x74, + 0x61, 0x74, 0x73, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, + 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x54, 0x61, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x73, 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, + 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x61, 0x64, 0x79, + 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x74, 0x70, 0x6c, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x4a, 0x73, 0x6f, + 0x6e, 0x54, 0x70, 0x6c, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, + 0x61, 0x74, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e, 0x6b, 0x73, + 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x73, 0x6f, 0x6e, + 0x12, 0x30, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x11, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x73, + 0x6f, 0x6e, 0x2a, 0x56, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x45, + 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x52, 0x4f, 0x58, 0x59, + 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x50, 0x41, 0x52, 0x45, 0x4e, + 0x54, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4d, 0x4f, 0x44, + 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x02, 0x42, 0x9f, 0x02, 0x0a, 0x22, 0x63, + 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, - 0x65, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x51, 0x0a, 0x0d, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, - 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x73, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, - 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x65, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, - 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6a, 0x73, 0x6f, - 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x16, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x14, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x13, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x77, 0x6f, 0x72, - 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, - 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3f, 0x0a, 0x1a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x1a, 0x74, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x71, - 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x14, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x61, 0x6c, - 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0e, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, - 0x79, 0x22, 0xc0, 0x04, 0x0a, 0x0f, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x5f, - 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, - 0x64, 0x55, 0x72, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, - 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x67, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, - 0x74, 0x73, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x73, - 0x74, 0x61, 0x74, 0x73, 0x54, 0x61, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x6d, - 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, - 0x75, 0x73, 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, - 0x61, 0x74, 0x73, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x73, 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, - 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x62, 0x69, 0x6e, 0x64, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x61, - 0x64, 0x79, 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x76, - 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x74, 0x70, 0x6c, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x4a, - 0x73, 0x6f, 0x6e, 0x54, 0x70, 0x6c, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x74, 0x61, 0x74, - 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x6a, 0x73, 0x6f, - 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e, - 0x6b, 0x73, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x73, - 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, - 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x11, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x4a, 0x73, 0x6f, 0x6e, 0x2a, 0x56, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, - 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x52, 0x4f, - 0x58, 0x59, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x50, 0x41, 0x52, - 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4d, - 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x02, 0x42, 0x92, 0x02, 0x0a, - 0x22, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x42, 0x0a, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, - 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, - 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, - 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0xe2, 0x02, 0x2a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x21, - 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, 0x68, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x31, 0x42, 0x17, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, + 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1e, 0x48, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, + 0x73, 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2a, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, + 0x65, 0x73, 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, + 0x73, 0x68, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( - file_pbmesh_v1alpha1_proxy_proto_rawDescOnce sync.Once - file_pbmesh_v1alpha1_proxy_proto_rawDescData = file_pbmesh_v1alpha1_proxy_proto_rawDesc + file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescOnce sync.Once + file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescData = file_pbmesh_v1alpha1_proxy_configuration_proto_rawDesc ) -func file_pbmesh_v1alpha1_proxy_proto_rawDescGZIP() []byte { - file_pbmesh_v1alpha1_proxy_proto_rawDescOnce.Do(func() { - file_pbmesh_v1alpha1_proxy_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbmesh_v1alpha1_proxy_proto_rawDescData) +func file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescGZIP() []byte { + file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescOnce.Do(func() { + file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescData) }) - return file_pbmesh_v1alpha1_proxy_proto_rawDescData + return file_pbmesh_v1alpha1_proxy_configuration_proto_rawDescData } -var file_pbmesh_v1alpha1_proxy_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_pbmesh_v1alpha1_proxy_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_pbmesh_v1alpha1_proxy_proto_goTypes = []interface{}{ +var file_pbmesh_v1alpha1_proxy_configuration_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_pbmesh_v1alpha1_proxy_configuration_proto_goTypes = []interface{}{ (ProxyMode)(0), // 0: hashicorp.consul.mesh.v1alpha1.ProxyMode (*ProxyConfiguration)(nil), // 1: hashicorp.consul.mesh.v1alpha1.ProxyConfiguration (*DynamicConfig)(nil), // 2: hashicorp.consul.mesh.v1alpha1.DynamicConfig @@ -717,7 +719,7 @@ var file_pbmesh_v1alpha1_proxy_proto_goTypes = []interface{}{ (*ExposeConfig)(nil), // 10: hashicorp.consul.mesh.v1alpha1.ExposeConfig (*ConnectionConfig)(nil), // 11: hashicorp.consul.mesh.v1alpha1.ConnectionConfig } -var file_pbmesh_v1alpha1_proxy_proto_depIdxs = []int32{ +var file_pbmesh_v1alpha1_proxy_configuration_proto_depIdxs = []int32{ 6, // 0: hashicorp.consul.mesh.v1alpha1.ProxyConfiguration.workloads:type_name -> hashicorp.consul.catalog.v1alpha1.WorkloadSelector 2, // 1: hashicorp.consul.mesh.v1alpha1.ProxyConfiguration.dynamic_config:type_name -> hashicorp.consul.mesh.v1alpha1.DynamicConfig 4, // 2: hashicorp.consul.mesh.v1alpha1.ProxyConfiguration.bootstrap_config:type_name -> hashicorp.consul.mesh.v1alpha1.BootstrapConfig @@ -736,16 +738,16 @@ var file_pbmesh_v1alpha1_proxy_proto_depIdxs = []int32{ 0, // [0:11] is the sub-list for field type_name } -func init() { file_pbmesh_v1alpha1_proxy_proto_init() } -func file_pbmesh_v1alpha1_proxy_proto_init() { - if File_pbmesh_v1alpha1_proxy_proto != nil { +func init() { file_pbmesh_v1alpha1_proxy_configuration_proto_init() } +func file_pbmesh_v1alpha1_proxy_configuration_proto_init() { + if File_pbmesh_v1alpha1_proxy_configuration_proto != nil { return } file_pbmesh_v1alpha1_connection_proto_init() file_pbmesh_v1alpha1_expose_proto_init() file_pbmesh_v1alpha1_routing_proto_init() if !protoimpl.UnsafeEnabled { - file_pbmesh_v1alpha1_proxy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProxyConfiguration); i { case 0: return &v.state @@ -757,7 +759,7 @@ func file_pbmesh_v1alpha1_proxy_proto_init() { return nil } } - file_pbmesh_v1alpha1_proxy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DynamicConfig); i { case 0: return &v.state @@ -769,7 +771,7 @@ func file_pbmesh_v1alpha1_proxy_proto_init() { return nil } } - file_pbmesh_v1alpha1_proxy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TransparentProxy); i { case 0: return &v.state @@ -781,7 +783,7 @@ func file_pbmesh_v1alpha1_proxy_proto_init() { return nil } } - file_pbmesh_v1alpha1_proxy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BootstrapConfig); i { case 0: return &v.state @@ -798,19 +800,19 @@ func file_pbmesh_v1alpha1_proxy_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pbmesh_v1alpha1_proxy_proto_rawDesc, + RawDescriptor: file_pbmesh_v1alpha1_proxy_configuration_proto_rawDesc, NumEnums: 1, NumMessages: 5, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_pbmesh_v1alpha1_proxy_proto_goTypes, - DependencyIndexes: file_pbmesh_v1alpha1_proxy_proto_depIdxs, - EnumInfos: file_pbmesh_v1alpha1_proxy_proto_enumTypes, - MessageInfos: file_pbmesh_v1alpha1_proxy_proto_msgTypes, + GoTypes: file_pbmesh_v1alpha1_proxy_configuration_proto_goTypes, + DependencyIndexes: file_pbmesh_v1alpha1_proxy_configuration_proto_depIdxs, + EnumInfos: file_pbmesh_v1alpha1_proxy_configuration_proto_enumTypes, + MessageInfos: file_pbmesh_v1alpha1_proxy_configuration_proto_msgTypes, }.Build() - File_pbmesh_v1alpha1_proxy_proto = out.File - file_pbmesh_v1alpha1_proxy_proto_rawDesc = nil - file_pbmesh_v1alpha1_proxy_proto_goTypes = nil - file_pbmesh_v1alpha1_proxy_proto_depIdxs = nil + File_pbmesh_v1alpha1_proxy_configuration_proto = out.File + file_pbmesh_v1alpha1_proxy_configuration_proto_rawDesc = nil + file_pbmesh_v1alpha1_proxy_configuration_proto_goTypes = nil + file_pbmesh_v1alpha1_proxy_configuration_proto_depIdxs = nil } diff --git a/proto-public/pbmesh/v1alpha1/proxy.proto b/proto-public/pbmesh/v1alpha1/proxy_configuration.proto similarity index 100% rename from proto-public/pbmesh/v1alpha1/proxy.proto rename to proto-public/pbmesh/v1alpha1/proxy_configuration.proto diff --git a/proto-public/pbmesh/v1alpha1/upstreams.pb.binary.go b/proto-public/pbmesh/v1alpha1/upstreams.pb.binary.go index cc8214e75a68..d67b41e3a9d0 100644 --- a/proto-public/pbmesh/v1alpha1/upstreams.pb.binary.go +++ b/proto-public/pbmesh/v1alpha1/upstreams.pb.binary.go @@ -28,12 +28,12 @@ func (msg *Upstream) UnmarshalBinary(b []byte) error { } // MarshalBinary implements encoding.BinaryMarshaler -func (msg *TCPAddress) MarshalBinary() ([]byte, error) { +func (msg *IPPortAddress) MarshalBinary() ([]byte, error) { return proto.Marshal(msg) } // UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *TCPAddress) UnmarshalBinary(b []byte) error { +func (msg *IPPortAddress) UnmarshalBinary(b []byte) error { return proto.Unmarshal(b, msg) } @@ -56,33 +56,3 @@ func (msg *PreparedQueryUpstream) MarshalBinary() ([]byte, error) { func (msg *PreparedQueryUpstream) UnmarshalBinary(b []byte) error { return proto.Unmarshal(b, msg) } - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *UpstreamConfig) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *UpstreamConfig) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *UpstreamLimits) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *UpstreamLimits) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *PassiveHealthCheck) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *PassiveHealthCheck) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} diff --git a/proto-public/pbmesh/v1alpha1/upstreams.pb.go b/proto-public/pbmesh/v1alpha1/upstreams.pb.go index 93b151a3366d..e8e9e4f9cf89 100644 --- a/proto-public/pbmesh/v1alpha1/upstreams.pb.go +++ b/proto-public/pbmesh/v1alpha1/upstreams.pb.go @@ -14,7 +14,6 @@ import ( pbresource "github.com/hashicorp/consul/proto-public/pbresource" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" ) @@ -33,10 +32,12 @@ type Upstreams struct { // Selection of workloads these upstreams should apply to. // These can be prefixes or specific workload names. - Workloads *v1alpha1.WorkloadSelector `protobuf:"bytes,1,opt,name=workloads,proto3" json:"workloads,omitempty"` - Upstreams []*Upstream `protobuf:"bytes,2,rep,name=upstreams,proto3" json:"upstreams,omitempty"` - PqUpstreams []*PreparedQueryUpstream `protobuf:"bytes,3,rep,name=pq_upstreams,json=pqUpstreams,proto3" json:"pq_upstreams,omitempty"` - UpstreamConfig *UpstreamConfig `protobuf:"bytes,4,opt,name=upstream_config,json=upstreamConfig,proto3" json:"upstream_config,omitempty"` + Workloads *v1alpha1.WorkloadSelector `protobuf:"bytes,1,opt,name=workloads,proto3" json:"workloads,omitempty"` + // upstreams is the list of explicit upstreams to define for the selected workloads. + Upstreams []*Upstream `protobuf:"bytes,2,rep,name=upstreams,proto3" json:"upstreams,omitempty"` + // pq_upstreams is the list of prepared query upstreams. This field is not supported directly in v2 + // and should only be used for migration reasons. + PqUpstreams []*PreparedQueryUpstream `protobuf:"bytes,3,rep,name=pq_upstreams,json=pqUpstreams,proto3" json:"pq_upstreams,omitempty"` } func (x *Upstreams) Reset() { @@ -92,27 +93,26 @@ func (x *Upstreams) GetPqUpstreams() []*PreparedQueryUpstream { return nil } -func (x *Upstreams) GetUpstreamConfig() *UpstreamConfig { - if x != nil { - return x.UpstreamConfig - } - return nil -} - type Upstream struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DestinationRef *pbresource.ID `protobuf:"bytes,1,opt,name=destination_ref,json=destinationRef,proto3" json:"destination_ref,omitempty"` - DestinationPort string `protobuf:"bytes,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` - Datacenter string `protobuf:"bytes,3,opt,name=datacenter,proto3" json:"datacenter,omitempty"` + // destination_ref is the reference to an upstream service. This has to be pbcatalog.Service type. + DestinationRef *pbresource.Reference `protobuf:"bytes,1,opt,name=destination_ref,json=destinationRef,proto3" json:"destination_ref,omitempty"` + // destination_port is the port name of the upstream service. This should be the name + // of the service's target port. + DestinationPort string `protobuf:"bytes,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` + // datacenter is the datacenter for where this upstream service lives. + Datacenter string `protobuf:"bytes,3,opt,name=datacenter,proto3" json:"datacenter,omitempty"` + // listen_addr is the address where Envoy will listen for requests to this upstream. + // It can provided either as an ip:port or as a Unix domain socket. + // // Types that are assignable to ListenAddr: // - // *Upstream_Tcp + // *Upstream_IpPort // *Upstream_Unix - ListenAddr isUpstream_ListenAddr `protobuf_oneof:"listen_addr"` - UpstreamConfig *UpstreamConfig `protobuf:"bytes,7,opt,name=upstream_config,json=upstreamConfig,proto3" json:"upstream_config,omitempty"` + ListenAddr isUpstream_ListenAddr `protobuf_oneof:"listen_addr"` } func (x *Upstream) Reset() { @@ -147,7 +147,7 @@ func (*Upstream) Descriptor() ([]byte, []int) { return file_pbmesh_v1alpha1_upstreams_proto_rawDescGZIP(), []int{1} } -func (x *Upstream) GetDestinationRef() *pbresource.ID { +func (x *Upstream) GetDestinationRef() *pbresource.Reference { if x != nil { return x.DestinationRef } @@ -175,9 +175,9 @@ func (m *Upstream) GetListenAddr() isUpstream_ListenAddr { return nil } -func (x *Upstream) GetTcp() *TCPAddress { - if x, ok := x.GetListenAddr().(*Upstream_Tcp); ok { - return x.Tcp +func (x *Upstream) GetIpPort() *IPPortAddress { + if x, ok := x.GetListenAddr().(*Upstream_IpPort); ok { + return x.IpPort } return nil } @@ -189,40 +189,35 @@ func (x *Upstream) GetUnix() *UnixSocketAddress { return nil } -func (x *Upstream) GetUpstreamConfig() *UpstreamConfig { - if x != nil { - return x.UpstreamConfig - } - return nil -} - type isUpstream_ListenAddr interface { isUpstream_ListenAddr() } -type Upstream_Tcp struct { - Tcp *TCPAddress `protobuf:"bytes,4,opt,name=tcp,proto3,oneof"` +type Upstream_IpPort struct { + IpPort *IPPortAddress `protobuf:"bytes,4,opt,name=ip_port,json=ipPort,proto3,oneof"` } type Upstream_Unix struct { Unix *UnixSocketAddress `protobuf:"bytes,5,opt,name=unix,proto3,oneof"` } -func (*Upstream_Tcp) isUpstream_ListenAddr() {} +func (*Upstream_IpPort) isUpstream_ListenAddr() {} func (*Upstream_Unix) isUpstream_ListenAddr() {} -type TCPAddress struct { +type IPPortAddress struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + // ip is an IPv4 or an IPv6 address. + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + // port is the port number. Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` } -func (x *TCPAddress) Reset() { - *x = TCPAddress{} +func (x *IPPortAddress) Reset() { + *x = IPPortAddress{} if protoimpl.UnsafeEnabled { mi := &file_pbmesh_v1alpha1_upstreams_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -230,13 +225,13 @@ func (x *TCPAddress) Reset() { } } -func (x *TCPAddress) String() string { +func (x *IPPortAddress) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TCPAddress) ProtoMessage() {} +func (*IPPortAddress) ProtoMessage() {} -func (x *TCPAddress) ProtoReflect() protoreflect.Message { +func (x *IPPortAddress) ProtoReflect() protoreflect.Message { mi := &file_pbmesh_v1alpha1_upstreams_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -248,19 +243,19 @@ func (x *TCPAddress) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TCPAddress.ProtoReflect.Descriptor instead. -func (*TCPAddress) Descriptor() ([]byte, []int) { +// Deprecated: Use IPPortAddress.ProtoReflect.Descriptor instead. +func (*IPPortAddress) Descriptor() ([]byte, []int) { return file_pbmesh_v1alpha1_upstreams_proto_rawDescGZIP(), []int{2} } -func (x *TCPAddress) GetIp() string { +func (x *IPPortAddress) GetIp() string { if x != nil { return x.Ip } return "" } -func (x *TCPAddress) GetPort() uint32 { +func (x *IPPortAddress) GetPort() uint32 { if x != nil { return x.Port } @@ -272,7 +267,10 @@ type UnixSocketAddress struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // path is the file system path at which to bind a Unix domain socket listener. Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // mode is the Unix file mode for the socket file. It should be provided + // in the numeric notation, for example, "0600". Mode string `protobuf:"bytes,2,opt,name=mode,proto3" json:"mode,omitempty"` } @@ -327,8 +325,13 @@ type PreparedQueryUpstream struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // name is the name of the prepared query to use as an upstream. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // datacenter is the datacenter for where this upstream service lives. Datacenter string `protobuf:"bytes,2,opt,name=datacenter,proto3" json:"datacenter,omitempty"` + // listen_addr is the address where Envoy will listen for requests to this upstream. + // It can provided either as an ip:port or as a Unix domain socket. + // // Types that are assignable to ListenAddr: // // *PreparedQueryUpstream_Tcp @@ -390,7 +393,7 @@ func (m *PreparedQueryUpstream) GetListenAddr() isPreparedQueryUpstream_ListenAd return nil } -func (x *PreparedQueryUpstream) GetTcp() *TCPAddress { +func (x *PreparedQueryUpstream) GetTcp() *IPPortAddress { if x, ok := x.GetListenAddr().(*PreparedQueryUpstream_Tcp); ok { return x.Tcp } @@ -416,7 +419,7 @@ type isPreparedQueryUpstream_ListenAddr interface { } type PreparedQueryUpstream_Tcp struct { - Tcp *TCPAddress `protobuf:"bytes,4,opt,name=tcp,proto3,oneof"` + Tcp *IPPortAddress `protobuf:"bytes,4,opt,name=tcp,proto3,oneof"` } type PreparedQueryUpstream_Unix struct { @@ -427,229 +430,6 @@ func (*PreparedQueryUpstream_Tcp) isPreparedQueryUpstream_ListenAddr() {} func (*PreparedQueryUpstream_Unix) isPreparedQueryUpstream_ListenAddr() {} -type UpstreamConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ConnectTimeoutMs uint64 `protobuf:"varint,2,opt,name=connect_timeout_ms,json=connectTimeoutMs,proto3" json:"connect_timeout_ms,omitempty"` - Limits *UpstreamLimits `protobuf:"bytes,3,opt,name=limits,proto3" json:"limits,omitempty"` - PassiveHealthCheck *PassiveHealthCheck `protobuf:"bytes,4,opt,name=passive_health_check,json=passiveHealthCheck,proto3" json:"passive_health_check,omitempty"` - BalanceOutboundConnections BalanceConnections `protobuf:"varint,5,opt,name=balance_outbound_connections,json=balanceOutboundConnections,proto3,enum=hashicorp.consul.mesh.v1alpha1.BalanceConnections" json:"balance_outbound_connections,omitempty"` - MeshGatewayMode MeshGatewayMode `protobuf:"varint,6,opt,name=mesh_gateway_mode,json=meshGatewayMode,proto3,enum=hashicorp.consul.mesh.v1alpha1.MeshGatewayMode" json:"mesh_gateway_mode,omitempty"` -} - -func (x *UpstreamConfig) Reset() { - *x = UpstreamConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_pbmesh_v1alpha1_upstreams_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpstreamConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpstreamConfig) ProtoMessage() {} - -func (x *UpstreamConfig) ProtoReflect() protoreflect.Message { - mi := &file_pbmesh_v1alpha1_upstreams_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpstreamConfig.ProtoReflect.Descriptor instead. -func (*UpstreamConfig) Descriptor() ([]byte, []int) { - return file_pbmesh_v1alpha1_upstreams_proto_rawDescGZIP(), []int{5} -} - -func (x *UpstreamConfig) GetConnectTimeoutMs() uint64 { - if x != nil { - return x.ConnectTimeoutMs - } - return 0 -} - -func (x *UpstreamConfig) GetLimits() *UpstreamLimits { - if x != nil { - return x.Limits - } - return nil -} - -func (x *UpstreamConfig) GetPassiveHealthCheck() *PassiveHealthCheck { - if x != nil { - return x.PassiveHealthCheck - } - return nil -} - -func (x *UpstreamConfig) GetBalanceOutboundConnections() BalanceConnections { - if x != nil { - return x.BalanceOutboundConnections - } - return BalanceConnections_BALANCE_CONNECTIONS_DEFAULT -} - -func (x *UpstreamConfig) GetMeshGatewayMode() MeshGatewayMode { - if x != nil { - return x.MeshGatewayMode - } - return MeshGatewayMode_MESH_GATEWAY_MODE_UNSPECIFIED -} - -// UpstreamLimits describes the limits that are associated with a specific -// upstream of a service instance. -type UpstreamLimits struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // MaxConnections is the maximum number of connections the local proxy can - // make to the upstream service. - MaxConnections int32 `protobuf:"varint,1,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` - // MaxPendingRequests is the maximum number of requests that will be queued - // waiting for an available connection. This is mostly applicable to HTTP/1.1 - // clusters since all HTTP/2 requests are streamed over a single - // connection. - MaxPendingRequests int32 `protobuf:"varint,2,opt,name=max_pending_requests,json=maxPendingRequests,proto3" json:"max_pending_requests,omitempty"` - // MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed - // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2 - // clusters since all HTTP/1.1 requests are limited by MaxConnections. - MaxConcurrentRequests int32 `protobuf:"varint,3,opt,name=max_concurrent_requests,json=maxConcurrentRequests,proto3" json:"max_concurrent_requests,omitempty"` -} - -func (x *UpstreamLimits) Reset() { - *x = UpstreamLimits{} - if protoimpl.UnsafeEnabled { - mi := &file_pbmesh_v1alpha1_upstreams_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpstreamLimits) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpstreamLimits) ProtoMessage() {} - -func (x *UpstreamLimits) ProtoReflect() protoreflect.Message { - mi := &file_pbmesh_v1alpha1_upstreams_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpstreamLimits.ProtoReflect.Descriptor instead. -func (*UpstreamLimits) Descriptor() ([]byte, []int) { - return file_pbmesh_v1alpha1_upstreams_proto_rawDescGZIP(), []int{6} -} - -func (x *UpstreamLimits) GetMaxConnections() int32 { - if x != nil { - return x.MaxConnections - } - return 0 -} - -func (x *UpstreamLimits) GetMaxPendingRequests() int32 { - if x != nil { - return x.MaxPendingRequests - } - return 0 -} - -func (x *UpstreamLimits) GetMaxConcurrentRequests() int32 { - if x != nil { - return x.MaxConcurrentRequests - } - return 0 -} - -type PassiveHealthCheck struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Interval between health check analysis sweeps. Each sweep may remove - // hosts or return hosts to the pool. - Interval *durationpb.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` - // MaxFailures is the count of consecutive failures that results in a host - // being removed from the pool. - MaxFailures uint32 `protobuf:"varint,2,opt,name=max_failures,json=maxFailures,proto3" json:"max_failures,omitempty"` - // EnforcingConsecutive5xx is the % chance that a host will be actually ejected - // when an outlier status is detected through consecutive 5xx. - // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. - EnforcingConsecutive_5Xx uint32 `protobuf:"varint,3,opt,name=enforcing_consecutive_5xx,json=enforcingConsecutive5xx,proto3" json:"enforcing_consecutive_5xx,omitempty"` -} - -func (x *PassiveHealthCheck) Reset() { - *x = PassiveHealthCheck{} - if protoimpl.UnsafeEnabled { - mi := &file_pbmesh_v1alpha1_upstreams_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PassiveHealthCheck) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PassiveHealthCheck) ProtoMessage() {} - -func (x *PassiveHealthCheck) ProtoReflect() protoreflect.Message { - mi := &file_pbmesh_v1alpha1_upstreams_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PassiveHealthCheck.ProtoReflect.Descriptor instead. -func (*PassiveHealthCheck) Descriptor() ([]byte, []int) { - return file_pbmesh_v1alpha1_upstreams_proto_rawDescGZIP(), []int{7} -} - -func (x *PassiveHealthCheck) GetInterval() *durationpb.Duration { - if x != nil { - return x.Interval - } - return nil -} - -func (x *PassiveHealthCheck) GetMaxFailures() uint32 { - if x != nil { - return x.MaxFailures - } - return 0 -} - -func (x *PassiveHealthCheck) GetEnforcingConsecutive_5Xx() uint32 { - if x != nil { - return x.EnforcingConsecutive_5Xx - } - return 0 -} - var File_pbmesh_v1alpha1_upstreams_proto protoreflect.FileDescriptor var file_pbmesh_v1alpha1_upstreams_proto_rawDesc = []byte{ @@ -657,158 +437,95 @@ var file_pbmesh_v1alpha1_upstreams_proto_rawDesc = []byte{ 0x31, 0x2f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x21, 0x70, 0x62, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x61, + 0x31, 0x1a, 0x21, 0x70, 0x62, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0xd9, 0x02, 0x0a, 0x09, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x51, - 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, - 0x73, 0x12, 0x46, 0x0a, 0x09, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x09, - 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x58, 0x0a, 0x0c, 0x70, 0x71, 0x5f, - 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x55, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x71, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x73, 0x12, 0x57, 0x0a, 0x0f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, - 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8e, 0x03, 0x0a, - 0x08, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x46, 0x0a, 0x0f, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x49, - 0x44, 0x52, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x66, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1e, 0x0a, 0x0a, - 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x03, - 0x74, 0x63, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x80, + 0x02, 0x0a, 0x09, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x51, 0x0a, 0x09, + 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, + 0x46, 0x0a, 0x09, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x09, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x58, 0x0a, 0x0c, 0x70, 0x71, 0x5f, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, + 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x55, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x71, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x73, 0x22, 0xc6, 0x02, 0x0a, 0x08, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x4d, + 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0e, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x12, 0x29, 0x0a, + 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, - 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x43, 0x50, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x03, 0x74, 0x63, 0x70, 0x12, 0x47, 0x0a, 0x04, - 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, - 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x78, - 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, - 0x04, 0x75, 0x6e, 0x69, 0x78, 0x12, 0x57, 0x0a, 0x0f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, - 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, - 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x22, 0x30, 0x0a, - 0x0a, 0x54, 0x43, 0x50, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x50, 0x50, 0x6f, 0x72, + 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, + 0x72, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x78, 0x42, 0x0d, 0x0a, 0x0b, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x22, 0x33, 0x0a, 0x0d, 0x49, 0x50, + 0x50, 0x6f, 0x72, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3b, 0x0a, 0x11, 0x55, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0xbc, 0x02, 0x0a, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0xbf, 0x02, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x03, 0x74, 0x63, - 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x03, 0x74, 0x63, + 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x43, 0x50, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x03, 0x74, 0x63, 0x70, 0x12, 0x47, 0x0a, 0x04, 0x75, 0x6e, - 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x78, 0x53, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x04, 0x75, - 0x6e, 0x69, 0x78, 0x12, 0x57, 0x0a, 0x0f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, - 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, - 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x22, 0xbf, 0x03, 0x0a, 0x0e, - 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, - 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4d, 0x73, 0x12, 0x46, 0x0a, 0x06, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, - 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x06, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x73, 0x12, 0x64, 0x0a, 0x14, 0x70, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x5f, - 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x12, 0x70, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x74, 0x0a, 0x1c, 0x62, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x32, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x1a, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x75, 0x74, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x5b, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, - 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x68, 0x61, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x50, 0x50, 0x6f, 0x72, 0x74, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x03, 0x74, 0x63, 0x70, 0x12, 0x47, 0x0a, + 0x04, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, - 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x65, 0x73, - 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0f, 0x6d, 0x65, - 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0xa3, 0x01, - 0x0a, 0x0e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x6d, - 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, 0x61, - 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x46, 0x61, 0x69, 0x6c, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x35, 0x78, - 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, 0x78, - 0x42, 0x96, 0x02, 0x0a, 0x22, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, - 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x2e, 0x56, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, - 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2a, 0x48, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, - 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, 0x68, 0x3a, - 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x69, + 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, + 0x52, 0x04, 0x75, 0x6e, 0x69, 0x78, 0x12, 0x57, 0x0a, 0x0f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, + 0x0d, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x42, 0x96, + 0x02, 0x0a, 0x22, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, + 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x2e, 0x56, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, + 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, 0x68, 0x3a, 0x3a, 0x56, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -823,44 +540,32 @@ func file_pbmesh_v1alpha1_upstreams_proto_rawDescGZIP() []byte { return file_pbmesh_v1alpha1_upstreams_proto_rawDescData } -var file_pbmesh_v1alpha1_upstreams_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_pbmesh_v1alpha1_upstreams_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_pbmesh_v1alpha1_upstreams_proto_goTypes = []interface{}{ (*Upstreams)(nil), // 0: hashicorp.consul.mesh.v1alpha1.Upstreams (*Upstream)(nil), // 1: hashicorp.consul.mesh.v1alpha1.Upstream - (*TCPAddress)(nil), // 2: hashicorp.consul.mesh.v1alpha1.TCPAddress + (*IPPortAddress)(nil), // 2: hashicorp.consul.mesh.v1alpha1.IPPortAddress (*UnixSocketAddress)(nil), // 3: hashicorp.consul.mesh.v1alpha1.UnixSocketAddress (*PreparedQueryUpstream)(nil), // 4: hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream - (*UpstreamConfig)(nil), // 5: hashicorp.consul.mesh.v1alpha1.UpstreamConfig - (*UpstreamLimits)(nil), // 6: hashicorp.consul.mesh.v1alpha1.UpstreamLimits - (*PassiveHealthCheck)(nil), // 7: hashicorp.consul.mesh.v1alpha1.PassiveHealthCheck - (*v1alpha1.WorkloadSelector)(nil), // 8: hashicorp.consul.catalog.v1alpha1.WorkloadSelector - (*pbresource.ID)(nil), // 9: hashicorp.consul.resource.ID - (BalanceConnections)(0), // 10: hashicorp.consul.mesh.v1alpha1.BalanceConnections - (MeshGatewayMode)(0), // 11: hashicorp.consul.mesh.v1alpha1.MeshGatewayMode - (*durationpb.Duration)(nil), // 12: google.protobuf.Duration + (*v1alpha1.WorkloadSelector)(nil), // 5: hashicorp.consul.catalog.v1alpha1.WorkloadSelector + (*pbresource.Reference)(nil), // 6: hashicorp.consul.resource.Reference + (*UpstreamConfig)(nil), // 7: hashicorp.consul.mesh.v1alpha1.UpstreamConfig } var file_pbmesh_v1alpha1_upstreams_proto_depIdxs = []int32{ - 8, // 0: hashicorp.consul.mesh.v1alpha1.Upstreams.workloads:type_name -> hashicorp.consul.catalog.v1alpha1.WorkloadSelector - 1, // 1: hashicorp.consul.mesh.v1alpha1.Upstreams.upstreams:type_name -> hashicorp.consul.mesh.v1alpha1.Upstream - 4, // 2: hashicorp.consul.mesh.v1alpha1.Upstreams.pq_upstreams:type_name -> hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream - 5, // 3: hashicorp.consul.mesh.v1alpha1.Upstreams.upstream_config:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamConfig - 9, // 4: hashicorp.consul.mesh.v1alpha1.Upstream.destination_ref:type_name -> hashicorp.consul.resource.ID - 2, // 5: hashicorp.consul.mesh.v1alpha1.Upstream.tcp:type_name -> hashicorp.consul.mesh.v1alpha1.TCPAddress - 3, // 6: hashicorp.consul.mesh.v1alpha1.Upstream.unix:type_name -> hashicorp.consul.mesh.v1alpha1.UnixSocketAddress - 5, // 7: hashicorp.consul.mesh.v1alpha1.Upstream.upstream_config:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamConfig - 2, // 8: hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream.tcp:type_name -> hashicorp.consul.mesh.v1alpha1.TCPAddress - 3, // 9: hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream.unix:type_name -> hashicorp.consul.mesh.v1alpha1.UnixSocketAddress - 5, // 10: hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream.upstream_config:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamConfig - 6, // 11: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.limits:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamLimits - 7, // 12: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.passive_health_check:type_name -> hashicorp.consul.mesh.v1alpha1.PassiveHealthCheck - 10, // 13: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.balance_outbound_connections:type_name -> hashicorp.consul.mesh.v1alpha1.BalanceConnections - 11, // 14: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.mesh_gateway_mode:type_name -> hashicorp.consul.mesh.v1alpha1.MeshGatewayMode - 12, // 15: hashicorp.consul.mesh.v1alpha1.PassiveHealthCheck.interval:type_name -> google.protobuf.Duration - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 5, // 0: hashicorp.consul.mesh.v1alpha1.Upstreams.workloads:type_name -> hashicorp.consul.catalog.v1alpha1.WorkloadSelector + 1, // 1: hashicorp.consul.mesh.v1alpha1.Upstreams.upstreams:type_name -> hashicorp.consul.mesh.v1alpha1.Upstream + 4, // 2: hashicorp.consul.mesh.v1alpha1.Upstreams.pq_upstreams:type_name -> hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream + 6, // 3: hashicorp.consul.mesh.v1alpha1.Upstream.destination_ref:type_name -> hashicorp.consul.resource.Reference + 2, // 4: hashicorp.consul.mesh.v1alpha1.Upstream.ip_port:type_name -> hashicorp.consul.mesh.v1alpha1.IPPortAddress + 3, // 5: hashicorp.consul.mesh.v1alpha1.Upstream.unix:type_name -> hashicorp.consul.mesh.v1alpha1.UnixSocketAddress + 2, // 6: hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream.tcp:type_name -> hashicorp.consul.mesh.v1alpha1.IPPortAddress + 3, // 7: hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream.unix:type_name -> hashicorp.consul.mesh.v1alpha1.UnixSocketAddress + 7, // 8: hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream.upstream_config:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamConfig + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name } func init() { file_pbmesh_v1alpha1_upstreams_proto_init() } @@ -868,8 +573,7 @@ func file_pbmesh_v1alpha1_upstreams_proto_init() { if File_pbmesh_v1alpha1_upstreams_proto != nil { return } - file_pbmesh_v1alpha1_connection_proto_init() - file_pbmesh_v1alpha1_routing_proto_init() + file_pbmesh_v1alpha1_upstreams_configuration_proto_init() if !protoimpl.UnsafeEnabled { file_pbmesh_v1alpha1_upstreams_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Upstreams); i { @@ -896,7 +600,7 @@ func file_pbmesh_v1alpha1_upstreams_proto_init() { } } file_pbmesh_v1alpha1_upstreams_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TCPAddress); i { + switch v := v.(*IPPortAddress); i { case 0: return &v.state case 1: @@ -931,45 +635,9 @@ func file_pbmesh_v1alpha1_upstreams_proto_init() { return nil } } - file_pbmesh_v1alpha1_upstreams_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpstreamConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pbmesh_v1alpha1_upstreams_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpstreamLimits); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pbmesh_v1alpha1_upstreams_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PassiveHealthCheck); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } } file_pbmesh_v1alpha1_upstreams_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*Upstream_Tcp)(nil), + (*Upstream_IpPort)(nil), (*Upstream_Unix)(nil), } file_pbmesh_v1alpha1_upstreams_proto_msgTypes[4].OneofWrappers = []interface{}{ @@ -982,7 +650,7 @@ func file_pbmesh_v1alpha1_upstreams_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_pbmesh_v1alpha1_upstreams_proto_rawDesc, NumEnums: 0, - NumMessages: 8, + NumMessages: 5, NumExtensions: 0, NumServices: 0, }, diff --git a/proto-public/pbmesh/v1alpha1/upstreams.proto b/proto-public/pbmesh/v1alpha1/upstreams.proto index c1f444e9ccc2..a78d8c3f9792 100644 --- a/proto-public/pbmesh/v1alpha1/upstreams.proto +++ b/proto-public/pbmesh/v1alpha1/upstreams.proto @@ -5,10 +5,8 @@ syntax = "proto3"; package hashicorp.consul.mesh.v1alpha1; -import "google/protobuf/duration.proto"; import "pbcatalog/v1alpha1/selector.proto"; -import "pbmesh/v1alpha1/connection.proto"; -import "pbmesh/v1alpha1/routing.proto"; +import "pbmesh/v1alpha1/upstreams_configuration.proto"; import "pbresource/resource.proto"; message Upstreams { @@ -16,85 +14,63 @@ message Upstreams { // These can be prefixes or specific workload names. hashicorp.consul.catalog.v1alpha1.WorkloadSelector workloads = 1; + // upstreams is the list of explicit upstreams to define for the selected workloads. repeated Upstream upstreams = 2; - repeated PreparedQueryUpstream pq_upstreams = 3; - UpstreamConfig upstream_config = 4; + // pq_upstreams is the list of prepared query upstreams. This field is not supported directly in v2 + // and should only be used for migration reasons. + repeated PreparedQueryUpstream pq_upstreams = 3; } message Upstream { - hashicorp.consul.resource.ID destination_ref = 1; + // destination_ref is the reference to an upstream service. This has to be pbcatalog.Service type. + hashicorp.consul.resource.Reference destination_ref = 1; + + // destination_port is the port name of the upstream service. This should be the name + // of the service's target port. string destination_port = 2; + + // datacenter is the datacenter for where this upstream service lives. string datacenter = 3; + // listen_addr is the address where Envoy will listen for requests to this upstream. + // It can provided either as an ip:port or as a Unix domain socket. oneof listen_addr { - TCPAddress tcp = 4; + IPPortAddress ip_port = 4; UnixSocketAddress unix = 5; } - - UpstreamConfig upstream_config = 7; } -message TCPAddress { +message IPPortAddress { + // ip is an IPv4 or an IPv6 address. string ip = 1; + + // port is the port number. uint32 port = 2; } message UnixSocketAddress { + // path is the file system path at which to bind a Unix domain socket listener. string path = 1; + + // mode is the Unix file mode for the socket file. It should be provided + // in the numeric notation, for example, "0600". string mode = 2; } message PreparedQueryUpstream { + // name is the name of the prepared query to use as an upstream. string name = 1; + + // datacenter is the datacenter for where this upstream service lives. string datacenter = 2; + // listen_addr is the address where Envoy will listen for requests to this upstream. + // It can provided either as an ip:port or as a Unix domain socket. oneof listen_addr { - TCPAddress tcp = 4; + IPPortAddress tcp = 4; UnixSocketAddress unix = 5; } UpstreamConfig upstream_config = 6; } - -message UpstreamConfig { - uint64 connect_timeout_ms = 2; - UpstreamLimits limits = 3; - PassiveHealthCheck passive_health_check = 4; - BalanceConnections balance_outbound_connections = 5; - MeshGatewayMode mesh_gateway_mode = 6; -} - -// UpstreamLimits describes the limits that are associated with a specific -// upstream of a service instance. -message UpstreamLimits { - // MaxConnections is the maximum number of connections the local proxy can - // make to the upstream service. - int32 max_connections = 1; - - // MaxPendingRequests is the maximum number of requests that will be queued - // waiting for an available connection. This is mostly applicable to HTTP/1.1 - // clusters since all HTTP/2 requests are streamed over a single - // connection. - int32 max_pending_requests = 2; - - // MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed - // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2 - // clusters since all HTTP/1.1 requests are limited by MaxConnections. - int32 max_concurrent_requests = 3; -} - -message PassiveHealthCheck { - // Interval between health check analysis sweeps. Each sweep may remove - // hosts or return hosts to the pool. - google.protobuf.Duration interval = 1; - - // MaxFailures is the count of consecutive failures that results in a host - // being removed from the pool. - uint32 max_failures = 2; - - // EnforcingConsecutive5xx is the % chance that a host will be actually ejected - // when an outlier status is detected through consecutive 5xx. - // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. - uint32 enforcing_consecutive_5xx = 3; -} diff --git a/proto-public/pbmesh/v1alpha1/upstreams_configuration.pb.binary.go b/proto-public/pbmesh/v1alpha1/upstreams_configuration.pb.binary.go new file mode 100644 index 000000000000..f4a5db281324 --- /dev/null +++ b/proto-public/pbmesh/v1alpha1/upstreams_configuration.pb.binary.go @@ -0,0 +1,58 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: pbmesh/v1alpha1/upstreams_configuration.proto + +package meshv1alpha1 + +import ( + "google.golang.org/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *UpstreamsConfiguration) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *UpstreamsConfiguration) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *UpstreamConfigOverrides) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *UpstreamConfigOverrides) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *UpstreamConfig) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *UpstreamConfig) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *UpstreamLimits) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *UpstreamLimits) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PassiveHealthCheck) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PassiveHealthCheck) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto-public/pbmesh/v1alpha1/upstreams_configuration.pb.go b/proto-public/pbmesh/v1alpha1/upstreams_configuration.pb.go new file mode 100644 index 000000000000..a4e2cc66c82e --- /dev/null +++ b/proto-public/pbmesh/v1alpha1/upstreams_configuration.pb.go @@ -0,0 +1,682 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: pbmesh/v1alpha1/upstreams_configuration.proto + +package meshv1alpha1 + +import ( + v1alpha1 "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbresource "github.com/hashicorp/consul/proto-public/pbresource" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type UpstreamsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Selection of workloads these upstreams should apply to. + // These can be prefixes or specific workload names. + Workloads *v1alpha1.WorkloadSelector `protobuf:"bytes,1,opt,name=workloads,proto3" json:"workloads,omitempty"` + // default_config applies to all upstreams for the workloads selected by this resource. + DefaultConfig *UpstreamConfig `protobuf:"bytes,2,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + // config_overrides provides per-upstream or per-upstream-port config overrides. + ConfigOverrides []*UpstreamConfigOverrides `protobuf:"bytes,3,rep,name=config_overrides,json=configOverrides,proto3" json:"config_overrides,omitempty"` +} + +func (x *UpstreamsConfiguration) Reset() { + *x = UpstreamsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamsConfiguration) ProtoMessage() {} + +func (x *UpstreamsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamsConfiguration.ProtoReflect.Descriptor instead. +func (*UpstreamsConfiguration) Descriptor() ([]byte, []int) { + return file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescGZIP(), []int{0} +} + +func (x *UpstreamsConfiguration) GetWorkloads() *v1alpha1.WorkloadSelector { + if x != nil { + return x.Workloads + } + return nil +} + +func (x *UpstreamsConfiguration) GetDefaultConfig() *UpstreamConfig { + if x != nil { + return x.DefaultConfig + } + return nil +} + +func (x *UpstreamsConfiguration) GetConfigOverrides() []*UpstreamConfigOverrides { + if x != nil { + return x.ConfigOverrides + } + return nil +} + +// UpstreamConfigOverrides allow to override upstream configuration per destination_ref/port/datacenter. +// In that sense, those three fields (destination_ref, destination_port and datacenter) are treated +// sort of like map keys and config is a like a map value for that key. +type UpstreamConfigOverrides struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // destination_ref is the reference to an upstream service that this configuration applies to. + // This has to be pbcatalog.Service type. + DestinationRef *pbresource.Reference `protobuf:"bytes,1,opt,name=destination_ref,json=destinationRef,proto3" json:"destination_ref,omitempty"` + // destination_port is the port name of the upstream service. This should be the name + // of the service's target port. If not provided, this configuration will apply to all ports of an upstream. + DestinationPort string `protobuf:"bytes,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` + // datacenter is the datacenter for where this upstream service lives. + Datacenter string `protobuf:"bytes,3,opt,name=datacenter,proto3" json:"datacenter,omitempty"` + // config is the configuration that should apply to this upstream. + Config *UpstreamConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *UpstreamConfigOverrides) Reset() { + *x = UpstreamConfigOverrides{} + if protoimpl.UnsafeEnabled { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamConfigOverrides) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamConfigOverrides) ProtoMessage() {} + +func (x *UpstreamConfigOverrides) ProtoReflect() protoreflect.Message { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamConfigOverrides.ProtoReflect.Descriptor instead. +func (*UpstreamConfigOverrides) Descriptor() ([]byte, []int) { + return file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescGZIP(), []int{1} +} + +func (x *UpstreamConfigOverrides) GetDestinationRef() *pbresource.Reference { + if x != nil { + return x.DestinationRef + } + return nil +} + +func (x *UpstreamConfigOverrides) GetDestinationPort() string { + if x != nil { + return x.DestinationPort + } + return "" +} + +func (x *UpstreamConfigOverrides) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +func (x *UpstreamConfigOverrides) GetConfig() *UpstreamConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpstreamConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // protocol overrides upstream's port protocol. If no port for an upstream is specified + // or if used in the default configuration, this protocol will be used for all ports + // or for all ports of all upstreams respectively. + Protocol v1alpha1.Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=hashicorp.consul.catalog.v1alpha1.Protocol" json:"protocol,omitempty"` + // connect_timeout is the timeout used when making a new + // connection to this upstream. Defaults to 5 seconds if not set. + ConnectTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=connect_timeout,json=connectTimeout,proto3" json:"connect_timeout,omitempty"` + // limits are the set of limits that are applied to the proxy for a specific upstream. + Limits *UpstreamLimits `protobuf:"bytes,3,opt,name=limits,proto3" json:"limits,omitempty"` + // passive_health_check configuration determines how upstream proxy instances will + // be monitored for removal from the load balancing pool. + PassiveHealthCheck *PassiveHealthCheck `protobuf:"bytes,4,opt,name=passive_health_check,json=passiveHealthCheck,proto3" json:"passive_health_check,omitempty"` + // balance_outbound_connections indicates how the proxy should attempt to distribute + // connections across worker threads. + BalanceOutboundConnections BalanceConnections `protobuf:"varint,5,opt,name=balance_outbound_connections,json=balanceOutboundConnections,proto3,enum=hashicorp.consul.mesh.v1alpha1.BalanceConnections" json:"balance_outbound_connections,omitempty"` + // MeshGatewayMode is the Mesh Gateway routing mode. + MeshGatewayMode MeshGatewayMode `protobuf:"varint,6,opt,name=mesh_gateway_mode,json=meshGatewayMode,proto3,enum=hashicorp.consul.mesh.v1alpha1.MeshGatewayMode" json:"mesh_gateway_mode,omitempty"` +} + +func (x *UpstreamConfig) Reset() { + *x = UpstreamConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamConfig) ProtoMessage() {} + +func (x *UpstreamConfig) ProtoReflect() protoreflect.Message { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamConfig.ProtoReflect.Descriptor instead. +func (*UpstreamConfig) Descriptor() ([]byte, []int) { + return file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescGZIP(), []int{2} +} + +func (x *UpstreamConfig) GetProtocol() v1alpha1.Protocol { + if x != nil { + return x.Protocol + } + return v1alpha1.Protocol(0) +} + +func (x *UpstreamConfig) GetConnectTimeout() *durationpb.Duration { + if x != nil { + return x.ConnectTimeout + } + return nil +} + +func (x *UpstreamConfig) GetLimits() *UpstreamLimits { + if x != nil { + return x.Limits + } + return nil +} + +func (x *UpstreamConfig) GetPassiveHealthCheck() *PassiveHealthCheck { + if x != nil { + return x.PassiveHealthCheck + } + return nil +} + +func (x *UpstreamConfig) GetBalanceOutboundConnections() BalanceConnections { + if x != nil { + return x.BalanceOutboundConnections + } + return BalanceConnections_BALANCE_CONNECTIONS_DEFAULT +} + +func (x *UpstreamConfig) GetMeshGatewayMode() MeshGatewayMode { + if x != nil { + return x.MeshGatewayMode + } + return MeshGatewayMode_MESH_GATEWAY_MODE_UNSPECIFIED +} + +// UpstreamLimits describes the limits that are associated with a specific +// upstream of a service instance. +type UpstreamLimits struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // max_connections is the maximum number of connections the local proxy can + // make to the upstream service. + MaxConnections int32 `protobuf:"varint,1,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` + // max_pending_requests is the maximum number of requests that will be queued + // waiting for an available connection. This is mostly applicable to HTTP/1.1 + // clusters since all HTTP/2 requests are streamed over a single + // connection. + MaxPendingRequests int32 `protobuf:"varint,2,opt,name=max_pending_requests,json=maxPendingRequests,proto3" json:"max_pending_requests,omitempty"` + // max_concurrent_requests is the maximum number of in-flight requests that will be allowed + // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2 + // clusters since all HTTP/1.1 requests are limited by MaxConnections. + MaxConcurrentRequests int32 `protobuf:"varint,3,opt,name=max_concurrent_requests,json=maxConcurrentRequests,proto3" json:"max_concurrent_requests,omitempty"` +} + +func (x *UpstreamLimits) Reset() { + *x = UpstreamLimits{} + if protoimpl.UnsafeEnabled { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamLimits) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamLimits) ProtoMessage() {} + +func (x *UpstreamLimits) ProtoReflect() protoreflect.Message { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamLimits.ProtoReflect.Descriptor instead. +func (*UpstreamLimits) Descriptor() ([]byte, []int) { + return file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescGZIP(), []int{3} +} + +func (x *UpstreamLimits) GetMaxConnections() int32 { + if x != nil { + return x.MaxConnections + } + return 0 +} + +func (x *UpstreamLimits) GetMaxPendingRequests() int32 { + if x != nil { + return x.MaxPendingRequests + } + return 0 +} + +func (x *UpstreamLimits) GetMaxConcurrentRequests() int32 { + if x != nil { + return x.MaxConcurrentRequests + } + return 0 +} + +type PassiveHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // interval between health check analysis sweeps. Each sweep may remove + // hosts or return hosts to the pool. + Interval *durationpb.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // max_failures is the count of consecutive failures that results in a host + // being removed from the pool. + MaxFailures uint32 `protobuf:"varint,2,opt,name=max_failures,json=maxFailures,proto3" json:"max_failures,omitempty"` + // enforcing_consecutive_5xx is the % chance that a host will be actually ejected + // when an outlier status is detected through consecutive 5xx. + // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. + EnforcingConsecutive_5Xx uint32 `protobuf:"varint,3,opt,name=enforcing_consecutive_5xx,json=enforcingConsecutive5xx,proto3" json:"enforcing_consecutive_5xx,omitempty"` +} + +func (x *PassiveHealthCheck) Reset() { + *x = PassiveHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PassiveHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PassiveHealthCheck) ProtoMessage() {} + +func (x *PassiveHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PassiveHealthCheck.ProtoReflect.Descriptor instead. +func (*PassiveHealthCheck) Descriptor() ([]byte, []int) { + return file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescGZIP(), []int{4} +} + +func (x *PassiveHealthCheck) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval + } + return nil +} + +func (x *PassiveHealthCheck) GetMaxFailures() uint32 { + if x != nil { + return x.MaxFailures + } + return 0 +} + +func (x *PassiveHealthCheck) GetEnforcingConsecutive_5Xx() uint32 { + if x != nil { + return x.EnforcingConsecutive_5Xx + } + return 0 +} + +var File_pbmesh_v1alpha1_upstreams_configuration_proto protoreflect.FileDescriptor + +var file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x1e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, + 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x70, 0x62, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x70, 0x62, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xa6, 0x02, 0x0a, 0x16, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x09, + 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, + 0x55, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x46, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9e, 0x04, 0x0a, 0x0e, 0x55, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x42, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x46, 0x0a, 0x06, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x06, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, + 0x12, 0x64, 0x0a, 0x14, 0x70, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x50, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x12, 0x70, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x74, 0x0a, 0x1c, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x68, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, + 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x1a, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, + 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x11, + 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6d, 0x6f, 0x64, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x68, 0x47, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0xa3, 0x01, 0x0a, 0x0e, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x65, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, + 0xaa, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x21, 0x0a, + 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, + 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x35, 0x78, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x17, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, 0x78, 0x42, 0xa3, 0x02, 0x0a, + 0x22, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x42, 0x1b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, + 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, + 0x68, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, + 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0xca, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0xe2, 0x02, 0x2a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, 0x68, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescOnce sync.Once + file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescData = file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDesc +) + +func file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescGZIP() []byte { + file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescOnce.Do(func() { + file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescData) + }) + return file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDescData +} + +var file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_pbmesh_v1alpha1_upstreams_configuration_proto_goTypes = []interface{}{ + (*UpstreamsConfiguration)(nil), // 0: hashicorp.consul.mesh.v1alpha1.UpstreamsConfiguration + (*UpstreamConfigOverrides)(nil), // 1: hashicorp.consul.mesh.v1alpha1.UpstreamConfigOverrides + (*UpstreamConfig)(nil), // 2: hashicorp.consul.mesh.v1alpha1.UpstreamConfig + (*UpstreamLimits)(nil), // 3: hashicorp.consul.mesh.v1alpha1.UpstreamLimits + (*PassiveHealthCheck)(nil), // 4: hashicorp.consul.mesh.v1alpha1.PassiveHealthCheck + (*v1alpha1.WorkloadSelector)(nil), // 5: hashicorp.consul.catalog.v1alpha1.WorkloadSelector + (*pbresource.Reference)(nil), // 6: hashicorp.consul.resource.Reference + (v1alpha1.Protocol)(0), // 7: hashicorp.consul.catalog.v1alpha1.Protocol + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration + (BalanceConnections)(0), // 9: hashicorp.consul.mesh.v1alpha1.BalanceConnections + (MeshGatewayMode)(0), // 10: hashicorp.consul.mesh.v1alpha1.MeshGatewayMode +} +var file_pbmesh_v1alpha1_upstreams_configuration_proto_depIdxs = []int32{ + 5, // 0: hashicorp.consul.mesh.v1alpha1.UpstreamsConfiguration.workloads:type_name -> hashicorp.consul.catalog.v1alpha1.WorkloadSelector + 2, // 1: hashicorp.consul.mesh.v1alpha1.UpstreamsConfiguration.default_config:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamConfig + 1, // 2: hashicorp.consul.mesh.v1alpha1.UpstreamsConfiguration.config_overrides:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamConfigOverrides + 6, // 3: hashicorp.consul.mesh.v1alpha1.UpstreamConfigOverrides.destination_ref:type_name -> hashicorp.consul.resource.Reference + 2, // 4: hashicorp.consul.mesh.v1alpha1.UpstreamConfigOverrides.config:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamConfig + 7, // 5: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.protocol:type_name -> hashicorp.consul.catalog.v1alpha1.Protocol + 8, // 6: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.connect_timeout:type_name -> google.protobuf.Duration + 3, // 7: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.limits:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamLimits + 4, // 8: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.passive_health_check:type_name -> hashicorp.consul.mesh.v1alpha1.PassiveHealthCheck + 9, // 9: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.balance_outbound_connections:type_name -> hashicorp.consul.mesh.v1alpha1.BalanceConnections + 10, // 10: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.mesh_gateway_mode:type_name -> hashicorp.consul.mesh.v1alpha1.MeshGatewayMode + 8, // 11: hashicorp.consul.mesh.v1alpha1.PassiveHealthCheck.interval:type_name -> google.protobuf.Duration + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_pbmesh_v1alpha1_upstreams_configuration_proto_init() } +func file_pbmesh_v1alpha1_upstreams_configuration_proto_init() { + if File_pbmesh_v1alpha1_upstreams_configuration_proto != nil { + return + } + file_pbmesh_v1alpha1_connection_proto_init() + file_pbmesh_v1alpha1_routing_proto_init() + if !protoimpl.UnsafeEnabled { + file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamConfigOverrides); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamLimits); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PassiveHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pbmesh_v1alpha1_upstreams_configuration_proto_goTypes, + DependencyIndexes: file_pbmesh_v1alpha1_upstreams_configuration_proto_depIdxs, + MessageInfos: file_pbmesh_v1alpha1_upstreams_configuration_proto_msgTypes, + }.Build() + File_pbmesh_v1alpha1_upstreams_configuration_proto = out.File + file_pbmesh_v1alpha1_upstreams_configuration_proto_rawDesc = nil + file_pbmesh_v1alpha1_upstreams_configuration_proto_goTypes = nil + file_pbmesh_v1alpha1_upstreams_configuration_proto_depIdxs = nil +} diff --git a/proto-public/pbmesh/v1alpha1/upstreams_configuration.proto b/proto-public/pbmesh/v1alpha1/upstreams_configuration.proto new file mode 100644 index 000000000000..9124528e2d90 --- /dev/null +++ b/proto-public/pbmesh/v1alpha1/upstreams_configuration.proto @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +package hashicorp.consul.mesh.v1alpha1; + +import "google/protobuf/duration.proto"; +import "pbcatalog/v1alpha1/protocol.proto"; +import "pbcatalog/v1alpha1/selector.proto"; +import "pbmesh/v1alpha1/connection.proto"; +import "pbmesh/v1alpha1/routing.proto"; +import "pbresource/resource.proto"; + +message UpstreamsConfiguration { + // Selection of workloads these upstreams should apply to. + // These can be prefixes or specific workload names. + hashicorp.consul.catalog.v1alpha1.WorkloadSelector workloads = 1; + + // default_config applies to all upstreams for the workloads selected by this resource. + UpstreamConfig default_config = 2; + + // config_overrides provides per-upstream or per-upstream-port config overrides. + repeated UpstreamConfigOverrides config_overrides = 3; +} + +// UpstreamConfigOverrides allow to override upstream configuration per destination_ref/port/datacenter. +// In that sense, those three fields (destination_ref, destination_port and datacenter) are treated +// sort of like map keys and config is a like a map value for that key. +message UpstreamConfigOverrides { + // destination_ref is the reference to an upstream service that this configuration applies to. + // This has to be pbcatalog.Service type. + hashicorp.consul.resource.Reference destination_ref = 1; + + // destination_port is the port name of the upstream service. This should be the name + // of the service's target port. If not provided, this configuration will apply to all ports of an upstream. + string destination_port = 2; + + // datacenter is the datacenter for where this upstream service lives. + string datacenter = 3; + + // config is the configuration that should apply to this upstream. + UpstreamConfig config = 4; +} + +message UpstreamConfig { + // protocol overrides upstream's port protocol. If no port for an upstream is specified + // or if used in the default configuration, this protocol will be used for all ports + // or for all ports of all upstreams respectively. + hashicorp.consul.catalog.v1alpha1.Protocol protocol = 1; + + // connect_timeout is the timeout used when making a new + // connection to this upstream. Defaults to 5 seconds if not set. + google.protobuf.Duration connect_timeout = 2; + + // limits are the set of limits that are applied to the proxy for a specific upstream. + UpstreamLimits limits = 3; + + // passive_health_check configuration determines how upstream proxy instances will + // be monitored for removal from the load balancing pool. + PassiveHealthCheck passive_health_check = 4; + + // balance_outbound_connections indicates how the proxy should attempt to distribute + // connections across worker threads. + BalanceConnections balance_outbound_connections = 5; + + // MeshGatewayMode is the Mesh Gateway routing mode. + MeshGatewayMode mesh_gateway_mode = 6; +} + +// UpstreamLimits describes the limits that are associated with a specific +// upstream of a service instance. +message UpstreamLimits { + // max_connections is the maximum number of connections the local proxy can + // make to the upstream service. + int32 max_connections = 1; + + // max_pending_requests is the maximum number of requests that will be queued + // waiting for an available connection. This is mostly applicable to HTTP/1.1 + // clusters since all HTTP/2 requests are streamed over a single + // connection. + int32 max_pending_requests = 2; + + // max_concurrent_requests is the maximum number of in-flight requests that will be allowed + // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2 + // clusters since all HTTP/1.1 requests are limited by MaxConnections. + int32 max_concurrent_requests = 3; +} + +message PassiveHealthCheck { + // interval between health check analysis sweeps. Each sweep may remove + // hosts or return hosts to the pool. + google.protobuf.Duration interval = 1; + + // max_failures is the count of consecutive failures that results in a host + // being removed from the pool. + uint32 max_failures = 2; + + // enforcing_consecutive_5xx is the % chance that a host will be actually ejected + // when an outlier status is detected through consecutive 5xx. + // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. + uint32 enforcing_consecutive_5xx = 3; +} From a2c6953d0d9ff5b988550e6c0d8f005e80b5f28d Mon Sep 17 00:00:00 2001 From: John Murret Date: Thu, 13 Jul 2023 13:26:35 -0600 Subject: [PATCH 25/43] [NET-4895] ci - api tests and consul container tests error because of dependency bugs with go 1.20.6. Pin go to 1.20.5. (#18124) ### Description The following jobs started failing when go 1.20.6 was released: - `go-test-api-1-19` - `go-test-api-1-20` - `compatibility-integration-tests` - `upgrade-integration-tests` `compatibility-integration-tests` and `compatibility-integration-tests` to this testcontainers issue: https://github.com/testcontainers/testcontainers-go/issues/1359. This issue calls for testcontainers to release a new version when one of their dependencies is fixed. When that is done, we will unpin the go versions in `compatibility-integration-tests` and `compatibility-integration-tests`. ### Testing & Reproduction steps See these jobs broken in CI and then see them work with this PR. --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> --- .github/workflows/go-tests.yml | 8 ++++++++ .github/workflows/reusable-unit.yml | 10 ++++++++++ .github/workflows/test-integrations.yml | 12 ++++++++++-- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 9baf90c505ed..be773c2b0e06 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -376,6 +376,7 @@ jobs: runs-on: ${{ needs.setup.outputs.compute-xl }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" + go-version: "1.19.10" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. contents: read @@ -394,6 +395,12 @@ jobs: runs-on: ${{ needs.setup.outputs.compute-xl }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" + # pinning this to 1.20.5 because this issue in go-testcontainers occurs + # in 1.20.6 with the error "http: invalid Host header, host port waiting failed" + # https://github.com/testcontainers/testcontainers-go/issues/1359 + # remove setting this when the above issue is fixed so that the reusable + # job will just get the go version from go.mod. + go-version: "1.20.5" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. contents: read @@ -412,6 +419,7 @@ jobs: runs-on: ${{ needs.setup.outputs.compute-xl }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" + go-version: "1.19" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. contents: read diff --git a/.github/workflows/reusable-unit.yml b/.github/workflows/reusable-unit.yml index c066cad3f48d..7442b06c33b5 100644 --- a/.github/workflows/reusable-unit.yml +++ b/.github/workflows/reusable-unit.yml @@ -33,6 +33,10 @@ on: required: false type: string default: "" + go-version: + required: false + type: string + default: "" secrets: elevated-github-token: required: true @@ -59,6 +63,12 @@ jobs: if: ${{ endsWith(inputs.repository-name, '-enterprise') }} run: git config --global url."https://${{ secrets.elevated-github-token }}:@github.com".insteadOf "https://github.com" - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + if: ${{ inputs.go-version != '' }} + with: + go-version: ${{ inputs.go-version }} + cache: true + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + if: ${{ inputs.go-version == '' }} with: go-version-file: 'go.mod' cache: true diff --git a/.github/workflows/test-integrations.yml b/.github/workflows/test-integrations.yml index 263a2e41e4ae..1a3876855d5e 100644 --- a/.github/workflows/test-integrations.yml +++ b/.github/workflows/test-integrations.yml @@ -372,7 +372,11 @@ jobs: run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 with: - go-version-file: 'go.mod' + # pinning this to 1.20.5 because this issue in go-testcontainers occurs + # in 1.20.6 with the error "http: invalid Host header, host port waiting failed" + # https://github.com/testcontainers/testcontainers-go/issues/1359 + # go-version-file: 'go.mod' + go-version: '1.20.5' - run: go env - name: docker env run: | @@ -487,7 +491,11 @@ jobs: run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 with: - go-version-file: 'go.mod' + # pinning this to 1.20.5 because this issue in go-testcontainers occurs + # in 1.20.6 with the error "http: invalid Host header, host port waiting failed" + # https://github.com/testcontainers/testcontainers-go/issues/1359 + # go-version-file: 'go.mod' + go-version: '1.20.5' - run: go env # Get go binary from workspace From 68863b42f845849500662e499334bf381f4fcb60 Mon Sep 17 00:00:00 2001 From: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> Date: Thu, 13 Jul 2023 15:17:32 -0700 Subject: [PATCH 26/43] Add ingress gateway deprecation notices to docs (#18102) ### Description This adds notices, that ingress gateway is deprecated, to several places in the product docs where ingress gateway is the topic. ### Testing & Reproduction steps Tested with a local copy of the website. ### Links Deprecation of ingress gateway was announced in the Release Notes for Consul 1.16 and Consul-K8s 1.2. See: [https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated ) [https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated) ### PR Checklist * [N/A] updated test coverage * [X] external facing docs updated * [X] appropriate backport labels added * [X] not a security concern --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- .../content/docs/concepts/service-mesh.mdx | 4 +-- .../config-entries/ingress-gateway.mdx | 8 +++++ .../content/docs/connect/gateways/index.mdx | 34 ++++++++++++++++--- .../gateways/ingress-gateway/index.mdx | 9 +++++ .../docs/k8s/connect/ingress-gateways.mdx | 9 ++++- 5 files changed, 56 insertions(+), 8 deletions(-) diff --git a/website/content/docs/concepts/service-mesh.mdx b/website/content/docs/concepts/service-mesh.mdx index 2e793f2441c8..947984484e45 100644 --- a/website/content/docs/concepts/service-mesh.mdx +++ b/website/content/docs/concepts/service-mesh.mdx @@ -53,13 +53,13 @@ The API gateway will route the incoming requests to the respective service. The A service mesh specializes in the network management of services and the communication between services. The mesh is responsible for keeping track of services and their health status, IP address, and traffic routing and ensuring all traffic between services is authenticated and encrypted. -Unlike API gateways, a service mesh will track all registered services' lifecycle and ensure requests are routed to healthy instances of the service. +Unlike some API gateways, a service mesh will track all registered services' lifecycle and ensure requests are routed to healthy instances of the service. API gateways are frequently deployed alongside a load balancer to ensure traffic is directed to healthy and available instances of the service. The mesh reduces the load balancer footprint as routing responsibilities are handled in a decentralized manner. API gateways can be used with a service mesh to bridge external networks (non-mesh) with a service mesh. --> **API gateways and traffic direction:** API gateways are often used to accept north-south traffic. North-south traffic is networking traffic that either enters or exits a data center or a virtual private network (VPC). +-> **API gateways and traffic direction:** API gateways are often used to accept north-south traffic. North-south traffic is networking traffic that either enters or exits a datacenter or a virtual private network (VPC). You can connect API gateways to a service mesh and provide access to it from outside the mesh. A service mesh is primarily used for handling east-west traffic. East-west traffic traditionally remains inside a data center or a VPC. A service mesh can be connected to another service mesh in another data center or VPC to form a federated mesh. diff --git a/website/content/docs/connect/config-entries/ingress-gateway.mdx b/website/content/docs/connect/config-entries/ingress-gateway.mdx index dc36c9fb0b38..63d990f9d864 100644 --- a/website/content/docs/connect/config-entries/ingress-gateway.mdx +++ b/website/content/docs/connect/config-entries/ingress-gateway.mdx @@ -7,6 +7,14 @@ description: >- # Ingress gateway configuration entry reference + + +Ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported in this version but will be removed in a future release of Consul. + +Consul's API gateway is the recommended alternative to ingress gateway. + + + This topic provides configuration reference information for the ingress gateway configuration entry. An ingress gateway is a type of proxy you register as a service in Consul to enable network connectivity from external services to services inside of the service mesh. Refer to [Ingress gateways overview](/consul/docs/connect/gateways/ingress-gateway) for additional information. ## Configuration model diff --git a/website/content/docs/connect/gateways/index.mdx b/website/content/docs/connect/gateways/index.mdx index b333615c4ed0..0a002523af14 100644 --- a/website/content/docs/connect/gateways/index.mdx +++ b/website/content/docs/connect/gateways/index.mdx @@ -17,8 +17,6 @@ This topic provides an overview of the gateway features shipped with Consul. Gat ## Mesh Gateways --> **1.6.0+:** This feature is available in Consul versions 1.6.0 and newer. - Mesh gateways enable service mesh traffic to be routed between different Consul datacenters and admin partitions. The datacenters or partitions can reside in different clouds or runtime environments where general interconnectivity between all services in all datacenters isn't feasible. @@ -35,9 +33,37 @@ Mesh gateways enable the following scenarios: -> **Mesh gateway tutorial**: Follow the [mesh gateway tutorial](/consul/tutorials/developer-mesh/service-mesh-gateways) to learn concepts associated with mesh gateways. +## API Gateways + +API gateways enable network access, from outside a service mesh, to services running in a Consul service mesh. The +systems accessing the services in the mesh, may be within your organizational network or external to it. This type of +network traffic is commonly called _north-south_ network traffic because it refers to the flow of data into and out of +a specific environment. + +API gateways solve the following primary use cases: + +- **Control access at the point of entry**: Set the protocols of external connection + requests and secure inbound connections with TLS certificates from trusted + providers, such as Verisign and Let's Encrypt. +- **Simplify traffic management**: Load balance requests across services and route + traffic to the appropriate service by matching one or more criteria, such as + hostname, path, header presence or value, and HTTP method. + +Refer to the following documentation for information on how to configure and deploy API gateways: +- [API Gateways on VMs](/consul/docs/connect/gateways/api-gateway/usage) +- [API Gateways for Kubernetes](/consul/docs/api-gateway). + + ## Ingress Gateways --> **1.8.0+:** This feature is available in Consul versions 1.8.0 and newer. + + +Ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported +in this version but will be removed in a future release of Consul. + +Consul's API gateway is the recommended alternative to ingress gateway. + + Ingress gateways enable connectivity within your organizational network from services outside the Consul service mesh to services in the mesh. To accept ingress traffic from the public internet, use Consul's @@ -56,8 +82,6 @@ and the [ingress gateway tutorial](/consul/tutorials/developer-mesh/service-mesh ## Terminating Gateways --> **1.8.0+:** This feature is available in Consul versions 1.8.0 and newer. - Terminating gateways enable connectivity within your organizational network from services in the Consul service mesh to services outside the mesh. Services outside the mesh do not have sidecar proxies or are not [integrated natively](/consul/docs/connect/native). diff --git a/website/content/docs/connect/gateways/ingress-gateway/index.mdx b/website/content/docs/connect/gateways/ingress-gateway/index.mdx index 6d686c2c737b..3f0b4ea836f9 100644 --- a/website/content/docs/connect/gateways/ingress-gateway/index.mdx +++ b/website/content/docs/connect/gateways/ingress-gateway/index.mdx @@ -11,6 +11,15 @@ An ingress gateway is a type of proxy that enables network connectivity from ext ![Ingress Gateway Architecture](/img/ingress-gateways.png) + + +Ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported +in this version but will be removed in a future release of Consul. + +Consul's API gateway is the recommended alternative to ingress gateway. + + + ## Workflow The following stages describe how to add an ingress gateway to your service mesh: diff --git a/website/content/docs/k8s/connect/ingress-gateways.mdx b/website/content/docs/k8s/connect/ingress-gateways.mdx index 8deab0e03750..be1d8e297d2e 100644 --- a/website/content/docs/k8s/connect/ingress-gateways.mdx +++ b/website/content/docs/k8s/connect/ingress-gateways.mdx @@ -7,7 +7,14 @@ description: >- # Configure Ingress Gateways for Consul on Kubernetes --> 1.9.0+: This feature is available in Consul versions 1.9.0 and higher + + +Ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported +in this version but will be removed in a future release of Consul. + +Consul's API gateway is the recommended alternative to ingress gateway. + + ~> This topic requires familiarity with [Ingress Gateways](/consul/docs/connect/gateways/ingress-gateway). From 2229206bbe7f863bf2603d45e87a3a19bff7e3f2 Mon Sep 17 00:00:00 2001 From: Ronald Date: Fri, 14 Jul 2023 07:10:42 -0400 Subject: [PATCH 27/43] Add docs for jwt cluster configuration (#18004) ### Description - Add jwt-provider docs for jwks cluster configuration. The configuration was added here: https://github.com/hashicorp/consul/pull/17978 --- .../connect/config-entries/jwt-provider.mdx | 255 ++++++++++++++++++ 1 file changed, 255 insertions(+) diff --git a/website/content/docs/connect/config-entries/jwt-provider.mdx b/website/content/docs/connect/config-entries/jwt-provider.mdx index 8867a3e4f972..8716dde8c23f 100644 --- a/website/content/docs/connect/config-entries/jwt-provider.mdx +++ b/website/content/docs/connect/config-entries/jwt-provider.mdx @@ -28,6 +28,18 @@ The following list outlines field hierarchy, language-specific data types, and r - [`RequestTimeoutMs`](#jsonwebkeyset-remote-requesttimeoutms): integer - [`CacheDuration`](#jsonwebkeyset-remote-cacheduration): string | `5m` - [`FetchAsynchronously`](#jsonwebkeyset-remote-fetchasynchronously): boolean | `false` + - [`JWKSCluster`](#jsonwebkeyset-remote-jwkscluster): map + - [`DiscoveryType`](#jsonwebkeyset-remote-jwkscluster-discoverytype): string | `STRICT_DNS` + - [`ConnectTimeout`](#jsonwebkeyset-remote-jwkscluster-connecttimeout): string | `5s` + - [`TLSCertificates`](#jsonwebkeyset-remote-jwkscluster-tlscertificates): map + - [`CaCertificateProviderInstance`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance): map + - [`InstanceName`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance): string | `default` + - [`CertificateName`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance): string + - [`TrustedCA`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): map + - [`Filename`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): string + - [`EnvironmentVariable`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): string + - [`InlineString`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): string + - [`InlineBytes`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): string - [`RetryPolicy`](#jsonwebkeyset-remote-retrypolicy): map - [`NumRetries`](#jsonwebkeyset-remote-retrypolicy-numretries): integer | `0` - [`RetryPolicyBackoff`](#jsonwebkeyset-remote-retrypolicy-retry-policy-backoff): map @@ -75,6 +87,18 @@ The following list outlines field hierarchy, language-specific data types, and r - [`retryPolicyBackoff`](#spec-jsonwebkeyset-remote-retrypolicy-retry-policy-backoff): map - [`baseInterval`](#spec-jsonwebkeyset-remote-retrypolicy-retry-policy-backoff): string - [`maxInterval`](#spec-jsonwebkeyset-remote-retrypolicy-retry-policy-backoff): string + - [`jwksCluster`](#spec-jsonwebkeyset-remote-jwkscluster): map + - [`discoveryType`](#spec-jsonwebkeyset-remote-jwkscluster-discoverytype): string | `STRICT_DNS` + - [`connectTimeout`](#spec-jsonwebkeyset-remote-jwkscluster-connecttimeout): string | `5s` + - [`tlsCertificates`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates): map + - [`caCertificateProviderInstance`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance): map + - [`instanceName`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance): string | `default` + - [`certificateName`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance): string + - [`trustedCA`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): map + - [`filename`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): string + - [`environmentVariable`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): string + - [`inlineString`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): string + - [`inlineBytes`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca): string - [`audiences`](#spec-audiences): list of strings - [`locations`](#spec-locations): list of maps - [`header`](#spec-locations-header): map @@ -126,8 +150,29 @@ JSONWebKeySet = { MaxInterval = "10s" } } + JWKSCluster = { + DiscoveryType = "STATIC" + ConnectTimeout = "10s" + # specify only one child: TrustedCA or CaCertificateProviderInstance + TLSCertificates = { + # specify only one child: Filename, EnvironmentVariable, InlineString or InlineBytes + TrustedCA = { + Filename = "" + EnvironmentVariable = "" + InlineString = "" + InlineBytes = "\302\000\302\302\302\302" + } + } + TLSCertificates = { + CaCertificateProviderInstance = { + InstanceName = "" + CertificateName = "" + } + } + } } } + Audiences = [""] Locations = [ { @@ -185,6 +230,25 @@ CacheConfig = { "BaseInterval": "1s", "MaxInterval": "10s" } + }, + "JWKSCluster": { + "DiscoveryType": "STATIC", + "ConnectTimeout": "10s", + // specify only one child: TrustedCA or CaCertificateProviderInstance + "TLSCertificates": { + // specify only one child: Filename, EnvironmentVariable, InlineString or InlineBytes + "TrustedCA": { + "Filename": "", + "EnvironmentVariable": "", + "InlineString": "", + "InlineBytes": "\302\000\302\302\302\302" + }, + "TLSCertificates": { + "CaCertificateProviderInstance": { + "InstanceName": "", + "CertificateName": "" + } + } } } }, @@ -246,6 +310,21 @@ spec: # required retryPolicyBackoff: baseInterval: 1s maxInterval: 10s + jwksCluster: + discoveryType: STATIC + connectTimeout: 10s + # specify only one child: trustedCA or caCertificateProviderInstance + tlsCertificates: + # specify only one child: filename, environmentVariable, inlineString or inlineBytes + trustedCA: + filename: + environmentVariable: + inlineString: + inlineBytes: \302\000\302\302\302\302 + tlsCertificates: + caCertificateProviderInstance: + instanceName: + certificateName: audiences: [] locations: header: @@ -360,6 +439,7 @@ Specifies a remote source for the JSON Web Key Set and configures behavior when - [`CacheDuration`](#jsonwebkeyset-remote-cacheduration) - [`FetchAsynchronously`](#jsonwebkeyset-remote-fetchasynchronously) - [`RetryPolicy`](#jsonwebkeyset-remote-retrypolicy) + - [`JWKSCluster`](#jsonwebkeyset-remote-jwkscluster) ### `JSONWebKeySet{}.Remote{}.URI` @@ -436,6 +516,93 @@ Specifies a jittered exponential backoff strategy. When this field is empty, Env | `BaseInterval`| Specifies the base interval to use for the next back off computation. | String | `1s` | | `MaxInterval` | Specifies the maximum interval between retries. By default, this value is 10 times `BaseInterval`. | String | `10s` | +### `JSONWebKeySet{}.Remote{}.JWKSCluster` + +Defines how Envoy fetches the remote JSON Web Key Set URI. + +#### Values + +- Default: None +- Data type: Map that can contain the following parameters: + + - [`DiscoveryType`](#jsonwebkeyset-remote-jwkscluster-discoverytype) + - [`ConnectTimeout`](#jsonwebkeyset-remote-jwkscluster-connecttimeout) + - [`TLSCertificates`](#jsonwebkeyset-remote-jwkscluster-tlscertificates) + + +### `JSONWebKeySet{}.Remote{}.JWKSCluster{}.DiscoveryType` + +Specifies the service discovery type to use for resolving the cluster. +You can specify the following discovery types: +- `STRICT_DNS` +- `STATIC` +- `LOGICAL_DNS` +- `EDS` +- `ORIGINAL_DST` + +#### Values + +- Default: `STRICT_DNS` +- Data type: String + +### `JSONWebKeySet{}.Remote{}.JWKSCluster{}.ConnectTimeout` + +Specifies the duration of time new network connections attempt to connect to hosts in the cluster before they timeout. + +#### Values + +- Default: `5s` +- Data type: String + +### `JSONWebKeySet{}.Remote{}.JWKSCluster{}.TLSCertificates` + +Specifies the data containing certificate authority certificates to use for verifying a presented peer certificate. +Envoy does not verify certificates that peers present if this field is not configured. + +You cannot specify [`TLSCertificates{}.CaCertificateProviderInstance`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance) and [`TLSCertificates{}.TrustedCA`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca) in the same map. + +#### Values + +- Default: None +- Data type: Map that can contain the following parameters: + + - [`CaCertificateProviderInstance`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance) + - [`TrustedCA`](#jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca) + +### `JSONWebKeySet{}.Remote{}.JWKSCluster{}.TLSCertificates{}.CaCertificateProviderInstance` + +Speficies the certificate provider instance for fetching TLS certificates. + +#### Values + +- Default: None +- Data type: Map that can contain the following parameters: + +| Parameter | Description | Data type | Default value | +| :-------- | :------------------------------------------------- | :-------- | :------------ | +| `InstanceName`| Refers to the certificate provider instance name. | String | `default` | +| `CertificateName` | Specifies the certificate instances or types. For example, use `ROOTCA` to specify a root-certificate. | String | None | + +### `JSONWebKeySet{}.Remote{}.JWKSCluster{}.TLSCertificates{}.TrustedCA` + +Specifies TLS certificate data containing certificate authority certificates. Specify exactly one of the following data holders: +- `Filename` +- `EnvironmentVariable` +- `InlineString` +- `InlineBytes` + +#### Values + +- Default: None +- Data type: Map containing one of the following parameters: + +| Parameter | Description | Data type | Default value | +| :-------- | :------------------------------------------------- | :-------- | :------------ | +| `Filename`| The name of the file on the local system to use a data source for trusted CA certificates. | String | None | +| `EnvironmentVariable` | The environment variable on the local system to use a data source for trusted CA certificates. | String | None | +| `InlineString` | A string to inline in the configuration for use as a data source for trusted CA certificates. | String | None | +| `InlineBytes` | A sequence of bytes to inline in the configuration for use as a data source for trusted CA certificates. | String | None | + ### `Audiences` Specifies a set of audiences that the JWT is allowed to access, formatted as a list of `aud` (audience) claims. When this field is specified, all JWTs verified with the provider must address at least one of the audiences in order to be considered valid. @@ -794,6 +961,94 @@ Specifies a jittered exponential backoff strategy. When this field is empty, Env | `baseInterval`| Specifies the base interval to use for the next back off computation. | String | `1s` | | `maxInterval` | Specifies the maximum interval between retries. By default, this value is 10 times `BaseInterval`. | String | `10s` | +### `spec.jsonWebKeySet.remote.jwksCluster` + +Defines how Envoy fetches the remote JSON Web Key Set URI. + +#### Values + +- Default: None +- Data type: Map that can contain the following parameters: + + - [`discoveryType`](#spec-jsonwebkeyset-remote-jwkscluster-discoverytype) + - [`connectTimeout`](#spec-jsonwebkeyset-remote-jwkscluster-connecttimeout) + - [`tlsCertificates`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates) + +### `spec.jsonWebKeySet.remote.jwksCluster.discoveryType` + +Specifies the service discovery type to use for resolving the cluster. +You can specify the following discovery types: +- `STRICT_DNS` +- `STATIC` +- `LOGICAL_DNS` +- `EDS` +- `ORIGINAL_DST` + +String values must be a valid [Cluster DiscoveryType](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/cluster.proto#envoy-v3-api-enum-config-cluster-v3-cluster-discoverytype). + +#### Values + +- Default: `STRICT_DNS` +- Data type: String + +### `spec.jsonWebKeySet.remote.jwksCluster.connectTimeout` + +Specifies the timeout for new network connections to hosts in the cluster. + +#### Values + +- Default: `5s` +- Data type: String + +### `spec.jsonWebKeySet.remote.jwksCluster.tlsCertificates` + +Specifies the data containing certificate authority certificates to use for verifying a presented peer certificate. +Envoy does not verify certificates that peers present if this field is not configured. + +You cannot specify [`spec.tlsCertificates.caCertificateProviderInstance`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance) and [`spec.tlsCertificates.trustedCA`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca) in the same map. + +#### Values + +- Default: None +- Data type: Map that can contain the following parameters: + + - [`caCertificateProviderInstance`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-cacertificateproviderinstance) + - [`trustedCA`](#spec-jsonwebkeyset-remote-jwkscluster-tlscertificates-trustedca) + +### `spec.jsonWebKeySet.remote.jwksCluster.tlsCertificates.caCertificateProviderInstance` + +Speficies the certificate provider instance for fetching TLS certificates. + +#### Values + +- Default: None +- Data type: Map that can contain the following parameters: + +| Parameter | Description | Data type | Default value | +| :-------- | :------------------------------------------------- | :-------- | :------------ | +| `instanceName`| Refers to the certificate provider instance name. | String | `default` | +| `certificateName` | Specifies the certificate instances or types. For example, use `ROOTCA` to specify a root-certificate. | String | None | + +### `spec.jsonWebKeySet.remote.jwksCluster.tlsCertificates.trustedCA` + +Specifies TLS certificate data containing certificate authority certificates. Specify exactly one of the following data holders: +- `Filename` +- `EnvironmentVariable` +- `InlineString` +- `InlineBytes` + +#### Values + +- Default: None +- Data type: Map containing one of the following parameters: + +| Parameter | Description | Data type | Default value | +| :-------- | :------------------------------------------------- | :-------- | :------------ | +| `filename`| The name of the file on the local system to use a data source for trusted CA certificates. | String | None | +| `environmentVariable` | The environment variable on the local system to use a data source for trusted CA certificates. | String | None | +| `inlineString` | A string to inline in the configuration for use as a data source for trusted CA certificates. | String | None | +| `inlineBytes` | A sequence of bytes to inline in the configuration for use as a data source for trusted CA certificates. | String | None | + ### `spec.audiences` Specifies a set of audiences that the JWT is allowed to access, formatted as a list of `aud` (audience) claims. When this field is specified, all JWTs verified with the provider must address at least one of the audiences in order to be considered valid. From ad6364af9ed80ba7a41c569011c4505ae5298819 Mon Sep 17 00:00:00 2001 From: cskh Date: Fri, 14 Jul 2023 09:44:21 -0400 Subject: [PATCH 28/43] Docs: fix unmatched bracket for health checks page (#18134) --- website/content/docs/services/usage/checks.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/content/docs/services/usage/checks.mdx b/website/content/docs/services/usage/checks.mdx index afbf53dcc99b..e72b219dd0bd 100644 --- a/website/content/docs/services/usage/checks.mdx +++ b/website/content/docs/services/usage/checks.mdx @@ -170,7 +170,7 @@ To enable script checks, you must first enable the agent to send external reques ``` -Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. +Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. ### Script check exit codes The following exit codes returned by the script check determine the health check status: @@ -185,7 +185,7 @@ Any output of the script is captured and made available in the `Output` field of _HTTP_ checks send an HTTP request to the specified URL and report the service health based on the [HTTP response code](#http-check-response-codes). We recommend using HTTP checks over [script checks](#script-checks) that use cURL or another external process to check an HTTP operation. ### HTTP check configuration -Add an `http` field to the `check` block in your service definition file and specify the HTTP address, including port number, for the check to call. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. +Add an `http` field to the `check` block in your service definition file and specify the HTTP address, including port number, for the check to call. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. In the following example, an HTTP check named `HTTP API on port 5000` sends a `POST` request to the `health` endpoint every 10 seconds: @@ -245,7 +245,7 @@ Responses larger than 4KB are truncated. The HTTP response determines the status TCP checks establish connections to the specified IPs or hosts. If the check successfully establishes a connection, the service status is reported as `success`. If the IP or host does not accept the connection, the service status is reported as `critical`. We recommend TCP checks over [script checks](#script-checks) that use netcat or another external process to check a socket operation. ### TCP check configuration -Add a `tcp` field to the `check` block in your service definition file and specify the address, including port number, for the check to call. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. +Add a `tcp` field to the `check` block in your service definition file and specify the address, including port number, for the check to call. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. In the following example, a TCP check named `SSH TCP on port 22` attempts to connect to `localhost:22` every 10 seconds: @@ -320,7 +320,7 @@ By default, UDP checks timeout at 10 seconds, but you can specify a custom timeo OSService checks if an OS service is running on the host. OSService checks support Windows services on Windows hosts or SystemD services on Unix hosts. The check logs the service as `healthy` if it is running. If the service is not running, the status is logged as `critical`. All other results are logged with `warning`. A `warning` status indicates that the check is not reliable because an issue is preventing it from determining the health of the service. ### OSService check configurations -Add an `os_service` field to the `check` block in your service definition file and specify the name of the service to check. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference] for information about all health check configurations. +Add an `os_service` field to the `check` block in your service definition file and specify the name of the service to check. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. In the following example, an OSService check named `svcname-001 Windows Service Health` verifies that the `myco-svctype-svcname-001` service is running every 10 seconds: @@ -363,7 +363,7 @@ TTL checks also persist their last known status to disk so that the Consul agent You can manually mark a service as unhealthy using the [`consul maint` CLI command](/consul/commands/maint) or [`agent/maintenance` HTTP API endpoint](/consul/api-docs/agent#enable-maintenance-mode), rather than waiting for a TTL health check if the `ttl` duration is high. ### TTL check configuration -Add a `ttl` field to the `check` block in your service definition file and specify how long to wait for an update from the external process. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference] for information about all health check configurations. +Add a `ttl` field to the `check` block in your service definition file and specify how long to wait for an update from the external process. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. In the following example, a TTL check named `Web App Status` logs the application as `critical` if a status update is not received every 30 seconds: @@ -450,7 +450,7 @@ check = { gRPC checks send a request to the specified endpoint. These checks are intended for applications that support the standard [gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md). ### gRPC check configuration -Add a `grpc` field to the `check` block in your service definition file and specify the endpoint, including port number, for sending requests. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference] for information about all health check configurations. +Add a `grpc` field to the `check` block in your service definition file and specify the endpoint, including port number, for sending requests. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. In the following example, a gRPC check named `Service health status` probes the entire application by sending requests to `127.0.0.1:12345` every 10 seconds: @@ -564,7 +564,7 @@ For aliased services on the same agent, the check monitors the local state witho For the blocking query, the alias check presents the ACL token set on the actual service or the token configured in the check definition. If neither are available, the alias check falls back to the default ACL token set for the agent. Refer to [`acl.tokens.default`](/consul/docs/agent/config/config-files#acl_tokens_default) for additional information about the default ACL token. ### Alias checks configuration -Add an `alias_service` field to the `check` block in your service definition file and specify the name of the service or node to alias. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference] for information about all health check configurations. +Add an `alias_service` field to the `check` block in your service definition file and specify the name of the service or node to alias. All other fields are optional. Refer to [Health Checks Configuration Reference](/consul/docs/services/configuration/checks-configuration-reference) for information about all health check configurations. In the following example, an alias check with the ID `web-alias` reports the health state of the `web` service: From 5208ea90e41c8ee91b4b47d42ef9381b6c55f253 Mon Sep 17 00:00:00 2001 From: Poonam Jadhav Date: Fri, 14 Jul 2023 14:09:02 -0400 Subject: [PATCH 29/43] NET-4657/add resource service client (#18053) ### Description Dan had already started on this [task](https://github.com/hashicorp/consul/pull/17849) which is needed to start building the HTTP APIs. This just needed some cleanup to get it ready for review. Overview: - Rename `internalResourceServiceClient` to `insecureResourceServiceClient` for name consistency - Configure a `secureResourceServiceClient` with auth enabled ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ --- agent/acl_test.go | 4 +++ agent/agent.go | 4 +++ agent/consul/client.go | 15 ++++++++ agent/consul/server.go | 78 +++++++++++++++++++++++++++++++++--------- 4 files changed, 85 insertions(+), 16 deletions(-) diff --git a/agent/acl_test.go b/agent/acl_test.go index 40662231ac36..5e5969dd6472 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" + "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/types" @@ -163,6 +164,9 @@ func (a *TestACLAgent) Stats() map[string]map[string]string { func (a *TestACLAgent) ReloadConfig(_ consul.ReloadableConfig) error { return fmt.Errorf("Unimplemented") } +func (a *TestACLAgent) ResourceServiceClient() pbresource.ResourceServiceClient { + return nil +} func TestACL_Version8EnabledByDefault(t *testing.T) { t.Parallel() diff --git a/agent/agent.go b/agent/agent.go index 881b94209d84..ef6559235205 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -71,6 +71,7 @@ import ( "github.com/hashicorp/consul/lib/mutex" "github.com/hashicorp/consul/lib/routine" "github.com/hashicorp/consul/logging" + "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto/private/pboperator" "github.com/hashicorp/consul/proto/private/pbpeering" "github.com/hashicorp/consul/tlsutil" @@ -198,6 +199,9 @@ type delegate interface { RPC(ctx context.Context, method string, args interface{}, reply interface{}) error + // ResourceServiceClient is a client for the gRPC Resource Service. + ResourceServiceClient() pbresource.ResourceServiceClient + SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error Shutdown() error Stats() map[string]map[string]string diff --git a/agent/consul/client.go b/agent/consul/client.go index e4a3f83324a1..256e0e58e379 100644 --- a/agent/consul/client.go +++ b/agent/consul/client.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" + "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" ) @@ -93,6 +94,9 @@ type Client struct { EnterpriseClient tlsConfigurator *tlsutil.Configurator + + // resourceServiceClient is a client for the gRPC Resource Service. + resourceServiceClient pbresource.ResourceServiceClient } // NewClient creates and returns a Client @@ -151,6 +155,13 @@ func NewClient(config *Config, deps Deps) (*Client, error) { } c.router = deps.Router + conn, err := deps.GRPCConnPool.ClientConn(deps.ConnPool.Datacenter) + if err != nil { + c.Shutdown() + return nil, fmt.Errorf("Failed to get gRPC client connection: %w", err) + } + c.resourceServiceClient = pbresource.NewResourceServiceClient(conn) + // Start LAN event handlers after the router is complete since the event // handlers depend on the router and the router depends on Serf. go c.lanEventHandler() @@ -451,3 +462,7 @@ func (c *Client) AgentEnterpriseMeta() *acl.EnterpriseMeta { func (c *Client) agentSegmentName() string { return c.config.Segment } + +func (c *Client) ResourceServiceClient() pbresource.ResourceServiceClient { + return c.resourceServiceClient +} diff --git a/agent/consul/server.go b/agent/consul/server.go index 6bb424c67535..2cfe9cb0aae8 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -442,10 +442,20 @@ type Server struct { // typeRegistry contains Consul's registered resource types. typeRegistry resource.Registry - // internalResourceServiceClient is a client that can be used to communicate - // with the Resource Service in-process (i.e. not via the network) without auth. - // It should only be used for purely-internal workloads, such as controllers. - internalResourceServiceClient pbresource.ResourceServiceClient + // resourceServiceServer implements the Resource Service. + resourceServiceServer *resourcegrpc.Server + + // insecureResourceServiceClient is a client that can be used to communicate + // with the Resource Service in-process (i.e. not via the network) *without* + // auth. It should only be used for purely-internal workloads, such as + // controllers. + insecureResourceServiceClient pbresource.ResourceServiceClient + + // secureResourceServiceClient is a client that can be used to communicate + // with the Resource Service in-process (i.e. not via the network) *with* auth. + // It can be used to make requests to the Resource Service on behalf of the user + // (e.g. from the HTTP API). + secureResourceServiceClient pbresource.ResourceServiceClient // controllerManager schedules the execution of controllers. controllerManager *controller.Manager @@ -803,11 +813,16 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, incom s.grpcHandler = newGRPCHandlerFromConfig(flat, config, s) s.grpcLeaderForwarder = flat.LeaderForwarder - if err := s.setupInternalResourceService(logger); err != nil { + if err := s.setupSecureResourceServiceClient(); err != nil { + return nil, err + } + + if err := s.setupInsecureResourceServiceClient(logger); err != nil { return nil, err } + s.controllerManager = controller.NewManager( - s.internalResourceServiceClient, + s.insecureResourceServiceClient, logger.Named(logging.ControllerRuntime), ) s.registerResources(flat) @@ -929,6 +944,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler s.peerStreamServer.Register(srv) s.externalACLServer.Register(srv) s.externalConnectCAServer.Register(srv) + s.resourceServiceServer.Register(srv) } return agentgrpc.NewHandler(deps.Logger, config.RPCAddr, register, nil, s.incomingRPCLimiter) @@ -1334,23 +1350,50 @@ func (s *Server) setupExternalGRPC(config *Config, logger hclog.Logger) { }) s.peerStreamServer.Register(s.externalGRPCServer) - resourcegrpc.NewServer(resourcegrpc.Config{ + s.resourceServiceServer = resourcegrpc.NewServer(resourcegrpc.Config{ Registry: s.typeRegistry, Backend: s.raftStorageBackend, ACLResolver: s.ACLResolver, Logger: logger.Named("grpc-api.resource"), - }).Register(s.externalGRPCServer) + }) + s.resourceServiceServer.Register(s.externalGRPCServer) } -func (s *Server) setupInternalResourceService(logger hclog.Logger) error { - server := grpc.NewServer() - - resourcegrpc.NewServer(resourcegrpc.Config{ +func (s *Server) setupInsecureResourceServiceClient(logger hclog.Logger) error { + server := resourcegrpc.NewServer(resourcegrpc.Config{ Registry: s.typeRegistry, Backend: s.raftStorageBackend, ACLResolver: resolver.DANGER_NO_AUTH{}, Logger: logger.Named("grpc-api.resource"), - }).Register(server) + }) + + conn, err := s.runInProcessGRPCServer(server.Register) + if err != nil { + return err + } + s.insecureResourceServiceClient = pbresource.NewResourceServiceClient(conn) + + return nil +} + +func (s *Server) setupSecureResourceServiceClient() error { + conn, err := s.runInProcessGRPCServer(s.resourceServiceServer.Register) + if err != nil { + return err + } + s.secureResourceServiceClient = pbresource.NewResourceServiceClient(conn) + + return nil +} + +// runInProcessGRPCServer runs a gRPC server that can only be accessed in the +// same process, rather than over the network, using a pipe listener. +func (s *Server) runInProcessGRPCServer(registerFn ...func(*grpc.Server)) (*grpc.ClientConn, error) { + server := grpc.NewServer() + + for _, fn := range registerFn { + fn(server) + } pipe := agentgrpc.NewPipeListener() go server.Serve(pipe) @@ -1367,15 +1410,14 @@ func (s *Server) setupInternalResourceService(logger hclog.Logger) error { ) if err != nil { server.Stop() - return err + return nil, err } go func() { <-s.shutdownCh conn.Close() }() - s.internalResourceServiceClient = pbresource.NewResourceServiceClient(conn) - return nil + return conn, nil } // Shutdown is used to shutdown the server @@ -2095,6 +2137,10 @@ func (s *Server) hcpServerStatus(deps Deps) hcp.StatusCallback { } } +func (s *Server) ResourceServiceClient() pbresource.ResourceServiceClient { + return s.secureResourceServiceClient +} + func fileExists(name string) (bool, error) { _, err := os.Stat(name) if err == nil { From 747a4c73c1eea8da2181b2cbb1ffa5721efb1325 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Fri, 14 Jul 2023 15:58:33 -0400 Subject: [PATCH 30/43] Fix bug with Vault CA provider (#18112) Updating RootPKIPath but not IntermediatePKIPath would not update leaf signing certs with the new root. Unsure if this happens in practice but manual testing showed it is a bug that would break mesh and agent connections once the old root is pruned. --- .changelog/18112.txt | 3 + agent/connect/ca/mock_Provider.go | 48 +++++------ agent/connect/ca/provider.go | 9 +- agent/connect/ca/provider_aws.go | 10 +-- agent/connect/ca/provider_aws_test.go | 18 ++-- agent/connect/ca/provider_consul.go | 18 ++-- agent/connect/ca/provider_consul_test.go | 16 ++-- agent/connect/ca/provider_vault.go | 31 +++---- agent/connect/ca/provider_vault_test.go | 25 +++--- agent/consul/leader_connect_ca.go | 70 +++++++++++---- agent/consul/leader_connect_ca_test.go | 105 +++++++++++++++++++---- agent/consul/leader_connect_test.go | 10 ++- 12 files changed, 227 insertions(+), 136 deletions(-) create mode 100644 .changelog/18112.txt diff --git a/.changelog/18112.txt b/.changelog/18112.txt new file mode 100644 index 000000000000..ddd37786f552 --- /dev/null +++ b/.changelog/18112.txt @@ -0,0 +1,3 @@ +```release-note:bug +ca: Fixes a Vault CA provider bug where updating RootPKIPath but not IntermediatePKIPath would not renew leaf signing certificates +``` diff --git a/agent/connect/ca/mock_Provider.go b/agent/connect/ca/mock_Provider.go index 0c9725f5ee08..0a745ad9bb3f 100644 --- a/agent/connect/ca/mock_Provider.go +++ b/agent/connect/ca/mock_Provider.go @@ -89,14 +89,13 @@ func (_m *MockProvider) CrossSignCA(_a0 *x509.Certificate) (string, error) { return r0, r1 } -// GenerateIntermediateCSR provides a mock function with given fields: -func (_m *MockProvider) GenerateIntermediateCSR() (string, string, error) { +// GenerateCAChain provides a mock function with given fields: +func (_m *MockProvider) GenerateCAChain() (string, error) { ret := _m.Called() var r0 string - var r1 string - var r2 error - if rf, ok := ret.Get(0).(func() (string, string, error)); ok { + var r1 error + if rf, ok := ret.Get(0).(func() (string, error)); ok { return rf() } if rf, ok := ret.Get(0).(func() string); ok { @@ -105,43 +104,44 @@ func (_m *MockProvider) GenerateIntermediateCSR() (string, string, error) { r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func() string); ok { + if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { - r1 = ret.Get(1).(string) - } - - if rf, ok := ret.Get(2).(func() error); ok { - r2 = rf() - } else { - r2 = ret.Error(2) + r1 = ret.Error(1) } - return r0, r1, r2 + return r0, r1 } -// GenerateCAChain provides a mock function with given fields: -func (_m *MockProvider) GenerateCAChain() (CAChainResult, error) { +// GenerateIntermediateCSR provides a mock function with given fields: +func (_m *MockProvider) GenerateIntermediateCSR() (string, string, error) { ret := _m.Called() - var r0 CAChainResult - var r1 error - if rf, ok := ret.Get(0).(func() (CAChainResult, error)); ok { + var r0 string + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func() (string, string, error)); ok { return rf() } - if rf, ok := ret.Get(0).(func() CAChainResult); ok { + if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { - r0 = ret.Get(0).(CAChainResult) + r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func() error); ok { + if rf, ok := ret.Get(1).(func() string); ok { r1 = rf() } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(string) } - return r0, r1 + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // SetIntermediate provides a mock function with given fields: intermediatePEM, rootPEM, opaque diff --git a/agent/connect/ca/provider.go b/agent/connect/ca/provider.go index 25d0deae3595..2ef34228bc48 100644 --- a/agent/connect/ca/provider.go +++ b/agent/connect/ca/provider.go @@ -135,9 +135,12 @@ type PrimaryProvider interface { // provider. // // Depending on the provider and its configuration, GenerateCAChain may return - // a single root certificate or a chain of certs. The provider should return an - // existing CA chain if one exists or generate a new one and return it. - GenerateCAChain() (CAChainResult, error) + // a single root certificate or a chain of certs. + // The first certificate must be the primary CA used to sign intermediates for + // secondary datacenters, and the last certificate must be the trusted CA. + // The provider should return an existing CA chain if one exists or generate a + // new one and return it. + GenerateCAChain() (string, error) // SignIntermediate will validate the CSR to ensure the trust domain in the // URI SAN matches the local one and that basic constraints for a CA diff --git a/agent/connect/ca/provider_aws.go b/agent/connect/ca/provider_aws.go index b7808edf9480..d45f3295a8e7 100644 --- a/agent/connect/ca/provider_aws.go +++ b/agent/connect/ca/provider_aws.go @@ -140,19 +140,19 @@ func (a *AWSProvider) State() (map[string]string, error) { } // GenerateCAChain implements Provider -func (a *AWSProvider) GenerateCAChain() (CAChainResult, error) { +func (a *AWSProvider) GenerateCAChain() (string, error) { if !a.isPrimary { - return CAChainResult{}, fmt.Errorf("provider is not the root certificate authority") + return "", fmt.Errorf("provider is not the root certificate authority") } if err := a.ensureCA(); err != nil { - return CAChainResult{}, err + return "", err } if a.rootPEM == "" { - return CAChainResult{}, fmt.Errorf("AWS CA provider not fully Initialized") + return "", fmt.Errorf("AWS CA provider not fully Initialized") } - return CAChainResult{PEM: a.rootPEM}, nil + return a.rootPEM, nil } // ensureCA loads the CA resource to check it exists if configured by User or in diff --git a/agent/connect/ca/provider_aws_test.go b/agent/connect/ca/provider_aws_test.go index 8c6a1f679f3e..cba2897fa26a 100644 --- a/agent/connect/ca/provider_aws_test.go +++ b/agent/connect/ca/provider_aws_test.go @@ -49,9 +49,8 @@ func TestAWSBootstrapAndSignPrimary(t *testing.T) { provider := testAWSProvider(t, testProviderConfigPrimary(t, cfg)) defer provider.Cleanup(true, nil) - root, err := provider.GenerateCAChain() + rootPEM, err := provider.GenerateCAChain() require.NoError(t, err) - rootPEM := root.PEM // Ensure they use the right key type rootCert, err := connect.ParseCert(rootPEM) @@ -76,9 +75,8 @@ func TestAWSBootstrapAndSignPrimary(t *testing.T) { provider := testAWSProvider(t, testProviderConfigPrimary(t, nil)) defer provider.Cleanup(true, nil) - root, err := provider.GenerateCAChain() + rootPEM, err := provider.GenerateCAChain() require.NoError(t, err) - rootPEM := root.PEM // Ensure they use the right key type rootCert, err := connect.ParseCert(rootPEM) @@ -111,9 +109,8 @@ func TestAWSBootstrapAndSignSecondary(t *testing.T) { p1 := testAWSProvider(t, testProviderConfigPrimary(t, nil)) defer p1.Cleanup(true, nil) - root, err := p1.GenerateCAChain() + rootPEM, err := p1.GenerateCAChain() require.NoError(t, err) - rootPEM := root.PEM p2 := testAWSProvider(t, testProviderConfigSecondary(t, nil)) defer p2.Cleanup(true, nil) @@ -140,9 +137,8 @@ func TestAWSBootstrapAndSignSecondary(t *testing.T) { cfg1 := testProviderConfigPrimary(t, nil) cfg1.State = p1State p1 = testAWSProvider(t, cfg1) - root, err := p1.GenerateCAChain() + newRootPEM, err := p1.GenerateCAChain() require.NoError(t, err) - newRootPEM := root.PEM cfg2 := testProviderConfigPrimary(t, nil) cfg2.State = p2State @@ -174,9 +170,8 @@ func TestAWSBootstrapAndSignSecondary(t *testing.T) { "ExistingARN": p1State[AWSStateCAARNKey], }) p1 = testAWSProvider(t, cfg1) - root, err := p1.GenerateCAChain() + newRootPEM, err := p1.GenerateCAChain() require.NoError(t, err) - newRootPEM := root.PEM cfg2 := testProviderConfigPrimary(t, map[string]interface{}{ "ExistingARN": p2State[AWSStateCAARNKey], @@ -213,9 +208,8 @@ func TestAWSBootstrapAndSignSecondary(t *testing.T) { p2 = testAWSProvider(t, cfg2) require.NoError(t, p2.SetIntermediate(newIntPEM, newRootPEM, "")) - root, err = p1.GenerateCAChain() + newRootPEM, err = p1.GenerateCAChain() require.NoError(t, err) - newRootPEM = root.PEM newIntPEM, err = p2.ActiveLeafSigningCert() require.NoError(t, err) diff --git a/agent/connect/ca/provider_consul.go b/agent/connect/ca/provider_consul.go index b35800c6da4c..01c4987e07d8 100644 --- a/agent/connect/ca/provider_consul.go +++ b/agent/connect/ca/provider_consul.go @@ -155,17 +155,17 @@ func (c *ConsulProvider) State() (map[string]string, error) { } // GenerateCAChain initializes a new root certificate and private key if needed. -func (c *ConsulProvider) GenerateCAChain() (CAChainResult, error) { +func (c *ConsulProvider) GenerateCAChain() (string, error) { providerState, err := c.getState() if err != nil { - return CAChainResult{}, err + return "", err } if !c.isPrimary { - return CAChainResult{}, fmt.Errorf("provider is not the root certificate authority") + return "", fmt.Errorf("provider is not the root certificate authority") } if providerState.RootCert != "" { - return CAChainResult{PEM: providerState.RootCert}, nil + return providerState.RootCert, nil } // Generate a private key if needed @@ -173,7 +173,7 @@ func (c *ConsulProvider) GenerateCAChain() (CAChainResult, error) { if c.config.PrivateKey == "" { _, pk, err := connect.GeneratePrivateKeyWithConfig(c.config.PrivateKeyType, c.config.PrivateKeyBits) if err != nil { - return CAChainResult{}, err + return "", err } newState.PrivateKey = pk } else { @@ -184,12 +184,12 @@ func (c *ConsulProvider) GenerateCAChain() (CAChainResult, error) { if c.config.RootCert == "" { nextSerial, err := c.incrementAndGetNextSerialNumber() if err != nil { - return CAChainResult{}, fmt.Errorf("error computing next serial number: %v", err) + return "", fmt.Errorf("error computing next serial number: %v", err) } ca, err := c.generateCA(newState.PrivateKey, nextSerial, c.config.RootCertTTL) if err != nil { - return CAChainResult{}, fmt.Errorf("error generating CA: %v", err) + return "", fmt.Errorf("error generating CA: %v", err) } newState.RootCert = ca } else { @@ -202,10 +202,10 @@ func (c *ConsulProvider) GenerateCAChain() (CAChainResult, error) { ProviderState: &newState, } if _, err := c.Delegate.ApplyCARequest(args); err != nil { - return CAChainResult{}, err + return "", err } - return CAChainResult{PEM: newState.RootCert}, nil + return newState.RootCert, nil } // GenerateIntermediateCSR creates a private key and generates a CSR diff --git a/agent/connect/ca/provider_consul_test.go b/agent/connect/ca/provider_consul_test.go index fd521a5bbe40..0c6959c7f5d4 100644 --- a/agent/connect/ca/provider_consul_test.go +++ b/agent/connect/ca/provider_consul_test.go @@ -93,10 +93,10 @@ func TestConsulCAProvider_Bootstrap(t *testing.T) { // Intermediate should be the same cert. inter, err := provider.ActiveLeafSigningCert() require.NoError(t, err) - require.Equal(t, root.PEM, inter) + require.Equal(t, root, inter) // Should be a valid cert - parsed, err := connect.ParseCert(root.PEM) + parsed, err := connect.ParseCert(root) require.NoError(t, err) require.Equal(t, parsed.URIs[0].String(), fmt.Sprintf("spiffe://%s.consul", conf.ClusterID)) requireNotEncoded(t, parsed.SubjectKeyId) @@ -128,10 +128,10 @@ func TestConsulCAProvider_Bootstrap_WithCert(t *testing.T) { root, err := provider.GenerateCAChain() require.NoError(t, err) - require.Equal(t, root.PEM, rootCA.RootCert) + require.Equal(t, root, rootCA.RootCert) // Should be a valid cert - parsed, err := connect.ParseCert(root.PEM) + parsed, err := connect.ParseCert(root) require.NoError(t, err) // test that the default root cert ttl was not applied to the provided cert @@ -298,7 +298,7 @@ func testCrossSignProviders(t *testing.T, provider1, provider2 Provider) { root, err := provider2.GenerateCAChain() require.NoError(t, err) - newRoot, err := connect.ParseCert(root.PEM) + newRoot, err := connect.ParseCert(root) require.NoError(t, err) oldSubject := newRoot.Subject.CommonName requireNotEncoded(t, newRoot.SubjectKeyId) @@ -321,7 +321,7 @@ func testCrossSignProviders(t *testing.T, provider1, provider2 Provider) { p1Root, err := provider1.GenerateCAChain() require.NoError(t, err) - oldRoot, err := connect.ParseCert(p1Root.PEM) + oldRoot, err := connect.ParseCert(p1Root) require.NoError(t, err) requireNotEncoded(t, oldRoot.SubjectKeyId) requireNotEncoded(t, oldRoot.AuthorityKeyId) @@ -385,7 +385,7 @@ func testCrossSignProvidersShouldFail(t *testing.T, provider1, provider2 Provide root, err := provider2.GenerateCAChain() require.NoError(t, err) - newRoot, err := connect.ParseCert(root.PEM) + newRoot, err := connect.ParseCert(root) require.NoError(t, err) requireNotEncoded(t, newRoot.SubjectKeyId) requireNotEncoded(t, newRoot.AuthorityKeyId) @@ -454,7 +454,7 @@ func testSignIntermediateCrossDC(t *testing.T, provider1, provider2 Provider) { require.NoError(t, err) root, err := provider1.GenerateCAChain() require.NoError(t, err) - rootPEM := root.PEM + rootPEM := root // Give the new intermediate to provider2 to use. require.NoError(t, provider2.SetIntermediate(intermediatePEM, rootPEM, opaque)) diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index 89350d87df3e..59b983dbecc4 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -280,9 +280,9 @@ func (v *VaultProvider) State() (map[string]string, error) { } // GenerateCAChain mounts and initializes a new root PKI backend if needed. -func (v *VaultProvider) GenerateCAChain() (CAChainResult, error) { +func (v *VaultProvider) GenerateCAChain() (string, error) { if !v.isPrimary { - return CAChainResult{}, fmt.Errorf("provider is not the root certificate authority") + return "", fmt.Errorf("provider is not the root certificate authority") } // Set up the root PKI backend if necessary. @@ -302,7 +302,7 @@ func (v *VaultProvider) GenerateCAChain() (CAChainResult, error) { }, }) if err != nil { - return CAChainResult{}, fmt.Errorf("failed to mount root CA backend: %w", err) + return "", fmt.Errorf("failed to mount root CA backend: %w", err) } // We want to initialize afterwards @@ -310,7 +310,7 @@ func (v *VaultProvider) GenerateCAChain() (CAChainResult, error) { case ErrBackendNotInitialized: uid, err := connect.CompactUID() if err != nil { - return CAChainResult{}, err + return "", err } resp, err := v.writeNamespaced(v.config.RootPKINamespace, v.config.RootPKIPath+"root/generate/internal", map[string]interface{}{ "common_name": connect.CACN("vault", uid, v.clusterID, v.isPrimary), @@ -319,23 +319,23 @@ func (v *VaultProvider) GenerateCAChain() (CAChainResult, error) { "key_bits": v.config.PrivateKeyBits, }) if err != nil { - return CAChainResult{}, fmt.Errorf("failed to initialize root CA: %w", err) + return "", fmt.Errorf("failed to initialize root CA: %w", err) } var ok bool rootPEM, ok = resp.Data["certificate"].(string) if !ok { - return CAChainResult{}, fmt.Errorf("unexpected response from Vault: %v", resp.Data["certificate"]) + return "", fmt.Errorf("unexpected response from Vault: %v", resp.Data["certificate"]) } default: if err != nil { - return CAChainResult{}, fmt.Errorf("unexpected error while setting root PKI backend: %w", err) + return "", fmt.Errorf("unexpected error while setting root PKI backend: %w", err) } } rootChain, err := v.getCAChain(v.config.RootPKINamespace, v.config.RootPKIPath) if err != nil { - return CAChainResult{}, err + return "", err } // Workaround for a bug in the Vault PKI API. @@ -344,18 +344,7 @@ func (v *VaultProvider) GenerateCAChain() (CAChainResult, error) { rootChain = rootPEM } - intermediate, err := v.ActiveLeafSigningCert() - if err != nil { - return CAChainResult{}, fmt.Errorf("error fetching active intermediate: %w", err) - } - if intermediate == "" { - intermediate, err = v.GenerateLeafSigningCert() - if err != nil { - return CAChainResult{}, fmt.Errorf("error generating intermediate: %w", err) - } - } - - return CAChainResult{PEM: rootChain, IntermediatePEM: intermediate}, nil + return rootChain, nil } // GenerateIntermediateCSR creates a private key and generates a CSR @@ -582,7 +571,7 @@ func (v *VaultProvider) getCAChain(namespace, path string) (string, error) { return root, nil } -// GenerateIntermediate mounts the configured intermediate PKI backend if +// GenerateLeafSigningCert mounts the configured intermediate PKI backend if // necessary, then generates and signs a new CA CSR using the root PKI backend // and updates the intermediate backend to use that new certificate. func (v *VaultProvider) GenerateLeafSigningCert() (string, error) { diff --git a/agent/connect/ca/provider_vault_test.go b/agent/connect/ca/provider_vault_test.go index 87dc1a04fe7a..008afa6081aa 100644 --- a/agent/connect/ca/provider_vault_test.go +++ b/agent/connect/ca/provider_vault_test.go @@ -420,7 +420,7 @@ func TestVaultCAProvider_Bootstrap(t *testing.T) { }, certFunc: func(provider *VaultProvider) (string, error) { root, err := provider.GenerateCAChain() - return root.PEM, err + return root, err }, backendPath: "pki-root/", rootCaCreation: true, @@ -497,9 +497,8 @@ func TestVaultCAProvider_SignLeaf(t *testing.T) { Service: "foo", } - root, err := provider.GenerateCAChain() + rootPEM, err := provider.GenerateCAChain() require.NoError(t, err) - rootPEM := root.PEM assertCorrectKeyType(t, tc.KeyType, rootPEM) intPEM, err := provider.ActiveLeafSigningCert() @@ -600,9 +599,9 @@ func TestVaultCAProvider_CrossSignCA(t *testing.T) { }) testutil.RunStep(t, "init", func(t *testing.T) { - root, err := provider1.GenerateCAChain() + rootPEM, err := provider1.GenerateCAChain() require.NoError(t, err) - assertCorrectKeyType(t, tc.SigningKeyType, root.PEM) + assertCorrectKeyType(t, tc.SigningKeyType, rootPEM) intPEM, err := provider1.ActiveLeafSigningCert() require.NoError(t, err) @@ -628,9 +627,9 @@ func TestVaultCAProvider_CrossSignCA(t *testing.T) { }) testutil.RunStep(t, "swap", func(t *testing.T) { - root, err := provider2.GenerateCAChain() + rootPEM, err := provider2.GenerateCAChain() require.NoError(t, err) - assertCorrectKeyType(t, tc.CSRKeyType, root.PEM) + assertCorrectKeyType(t, tc.CSRKeyType, rootPEM) intPEM, err := provider2.ActiveLeafSigningCert() require.NoError(t, err) @@ -1147,14 +1146,14 @@ func TestVaultCAProvider_GenerateIntermediate(t *testing.T) { // This test was created to ensure that our calls to Vault // returns a new Intermediate certificate and further calls // to ActiveLeafSigningCert return the same new cert. - new, err := provider.GenerateLeafSigningCert() + newLeaf, err := provider.GenerateLeafSigningCert() require.NoError(t, err) newActive, err := provider.ActiveLeafSigningCert() require.NoError(t, err) - require.Equal(t, new, newActive) - require.NotEqual(t, orig, new) + require.Equal(t, newLeaf, newActive) + require.NotEqual(t, orig, newLeaf) } func TestVaultCAProvider_AutoTidyExpiredIssuers(t *testing.T) { @@ -1240,9 +1239,8 @@ func TestVaultCAProvider_GenerateIntermediate_inSecondary(t *testing.T) { // Sign the CSR with primaryProvider. intermediatePEM, err := primaryProvider.SignIntermediate(csr) require.NoError(t, err) - root, err := primaryProvider.GenerateCAChain() + rootPEM, err := primaryProvider.GenerateCAChain() require.NoError(t, err) - rootPEM := root.PEM // Give the new intermediate to provider to use. require.NoError(t, provider.SetIntermediate(intermediatePEM, rootPEM, issuerID)) @@ -1261,9 +1259,8 @@ func TestVaultCAProvider_GenerateIntermediate_inSecondary(t *testing.T) { // Sign the CSR with primaryProvider. intermediatePEM, err := primaryProvider.SignIntermediate(csr) require.NoError(t, err) - root, err := primaryProvider.GenerateCAChain() + rootPEM, err := primaryProvider.GenerateCAChain() require.NoError(t, err) - rootPEM := root.PEM // Give the new intermediate to provider to use. require.NoError(t, provider.SetIntermediate(intermediatePEM, rootPEM, issuerID)) diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index c8c63c8874aa..717c9ff0b254 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -258,8 +258,8 @@ func (c *CAManager) initializeCAConfig() (*structs.CAConfiguration, error) { } // newCARoot returns a filled-in structs.CARoot from a raw PEM value. -func newCARoot(rootResult ca.CAChainResult, provider, clusterID string) (*structs.CARoot, error) { - primaryCert, err := connect.ParseCert(rootResult.PEM) +func newCARoot(caPem, provider, clusterID string) (*structs.CARoot, error) { + primaryCert, err := connect.ParseCert(caPem) if err != nil { return nil, err } @@ -275,17 +275,12 @@ func newCARoot(rootResult ca.CAChainResult, provider, clusterID string) (*struct ExternalTrustDomain: clusterID, NotBefore: primaryCert.NotBefore, NotAfter: primaryCert.NotAfter, - RootCert: lib.EnsureTrailingNewline(rootResult.PEM), + RootCert: lib.EnsureTrailingNewline(caPem), PrivateKeyType: keyType, PrivateKeyBits: keyBits, Active: true, } - if rootResult.IntermediatePEM == "" { - return caRoot, nil - } - if err := setLeafSigningCert(caRoot, rootResult.IntermediatePEM); err != nil { - return nil, fmt.Errorf("error setting leaf signing cert: %w", err) - } + return caRoot, nil } @@ -518,6 +513,19 @@ func (c *CAManager) primaryInitialize(provider ca.Provider, conf *structs.CAConf return err } + // provider may use intermediates for leaf signing in which case + // we need to generate a leaf signing CA. + if usesIntermediate, ok := provider.(ca.PrimaryUsesIntermediate); ok { + leafPem, err := usesIntermediate.GenerateLeafSigningCert() + if err != nil { + return fmt.Errorf("error generating new leaf signing cert: %w", err) + } + + if err := setLeafSigningCert(rootCA, leafPem); err != nil { + return fmt.Errorf("error setting leaf signing cert: %w", err) + } + } + var rootUpdateRequired bool if len(rootCA.IntermediateCerts) > 0 { rootUpdateRequired = true @@ -764,7 +772,6 @@ func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error) return err } - // Exit early if it's a no-op change state := c.delegate.State() _, config, err := state.CAConfig(nil) if err != nil { @@ -780,6 +787,8 @@ func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error) // Don't allow users to change the ClusterID. args.Config.ClusterID = config.ClusterID + + // Exit early if it's a no-op change if args.Config.Provider == config.Provider && reflect.DeepEqual(args.Config.Config, config.Config) { return nil } @@ -866,26 +875,53 @@ func (c *CAManager) primaryUpdateRootCA(newProvider ca.Provider, args *structs.C } args.Config.State = pState - providerRoot, err := newProvider.GenerateCAChain() + caPEM, err := newProvider.GenerateCAChain() if err != nil { return fmt.Errorf("error generating CA root certificate: %v", err) } - newRootPEM := providerRoot.PEM - newActiveRoot, err := newCARoot(providerRoot, args.Config.Provider, args.Config.ClusterID) + newActiveRoot, err := newCARoot(caPEM, args.Config.Provider, args.Config.ClusterID) if err != nil { return err } + // Fetch the existing root CA to compare with the current one. state := c.delegate.State() - // Compare the new provider's root CA ID to the current one. If they - // match, just update the existing provider with the new config. - // If they don't match, begin the root rotation process. _, root, err := state.CARootActive(nil) if err != nil { return err } + // provider may use intermediates for leaf signing in which case + // we may need to generate a leaf signing CA if the root has changed. + if usesIntermediate, ok := newProvider.(ca.PrimaryUsesIntermediate); ok { + var leafPemFunc func() (string, error) + if root != nil && root.ID == newActiveRoot.ID { + // If Root ID is the same, we can reuse the existing leaf signing cert + leafPemFunc = newProvider.ActiveLeafSigningCert + } else { + // If Root ID is different, we need to generate a new leaf signing cert + // else the trust chain will break when the old root expires. + leafPemFunc = usesIntermediate.GenerateLeafSigningCert + } + leafPem, err := leafPemFunc() + if err != nil { + return fmt.Errorf("error fetching leaf signing cert: %w", err) + } + // newProvider.ActiveLeafSigningCert may return a blank leafPem so we + // fall back to generating a new one just in case. + if leafPem == "" { + leafPem, err = usesIntermediate.GenerateLeafSigningCert() + if err != nil { + return fmt.Errorf("error generating new leaf signing cert: %w", err) + } + } + + if err := setLeafSigningCert(newActiveRoot, leafPem); err != nil { + return fmt.Errorf("error setting leaf signing cert: %w", err) + } + } + // If the root didn't change, just update the config and return. if root != nil && root.ID == newActiveRoot.ID { args.Op = structs.CAOpSetConfig @@ -919,7 +955,7 @@ func (c *CAManager) primaryUpdateRootCA(newProvider ca.Provider, args *structs.C // 3. Take the active root for the new provider and append the intermediate from step 2 // to its list of intermediates. // TODO: this cert is already parsed once in newCARoot, could we remove the second parse? - newRoot, err := connect.ParseCert(newRootPEM) + newRoot, err := connect.ParseCert(caPEM) if err != nil { return err } diff --git a/agent/consul/leader_connect_ca_test.go b/agent/consul/leader_connect_ca_test.go index a8ca93101e44..e1c2cf8506c2 100644 --- a/agent/consul/leader_connect_ca_test.go +++ b/agent/consul/leader_connect_ca_test.go @@ -259,8 +259,8 @@ type mockCAProvider struct { func (m *mockCAProvider) Configure(cfg ca.ProviderConfig) error { return nil } func (m *mockCAProvider) State() (map[string]string, error) { return nil, nil } -func (m *mockCAProvider) GenerateCAChain() (ca.CAChainResult, error) { - return ca.CAChainResult{PEM: m.rootPEM}, nil +func (m *mockCAProvider) GenerateCAChain() (string, error) { + return m.rootPEM, nil } func (m *mockCAProvider) GenerateIntermediateCSR() (string, string, error) { m.callbackCh <- "provider/GenerateIntermediateCSR" @@ -624,7 +624,7 @@ func TestCAManager_UpdateConfiguration_Vault_Primary(t *testing.T) { require.Equal(t, connect.HexString(cert.SubjectKeyId), origRoot.SigningKeyID) t.Run("update config without changing root", func(t *testing.T) { - err = s1.caManager.UpdateConfiguration(&structs.CARequest{ + require.NoError(t, s1.caManager.UpdateConfiguration(&structs.CARequest{ Config: &structs.CAConfiguration{ Provider: "vault", Config: map[string]interface{}{ @@ -635,50 +635,117 @@ func TestCAManager_UpdateConfiguration_Vault_Primary(t *testing.T) { "CSRMaxPerSecond": 100, }, }, - }) - require.NoError(t, err) - _, sameRoot, err := s1.fsm.State().CARootActive(nil) + })) + + _, newRoot, err := s1.fsm.State().CARootActive(nil) require.NoError(t, err) - require.Len(t, sameRoot.IntermediateCerts, 1) - sameRoot.CreateIndex = s1.caManager.providerRoot.CreateIndex - sameRoot.ModifyIndex = s1.caManager.providerRoot.ModifyIndex + require.Len(t, newRoot.IntermediateCerts, 1) + newRoot.CreateIndex = s1.caManager.providerRoot.CreateIndex + newRoot.ModifyIndex = s1.caManager.providerRoot.ModifyIndex - cert, err := connect.ParseCert(s1.caManager.getLeafSigningCertFromRoot(sameRoot)) + orig, err := connect.ParseCert(s1.caManager.getLeafSigningCertFromRoot(newRoot)) require.NoError(t, err) - require.Equal(t, connect.HexString(cert.SubjectKeyId), sameRoot.SigningKeyID) + require.Equal(t, connect.HexString(orig.SubjectKeyId), newRoot.SigningKeyID) - require.Equal(t, origRoot, sameRoot) - require.Equal(t, sameRoot, s1.caManager.providerRoot) + require.Equal(t, origRoot, newRoot) + require.Equal(t, newRoot, s1.caManager.providerRoot) }) - t.Run("update config and change root", func(t *testing.T) { + t.Run("update config and change root only", func(t *testing.T) { + // Read the active leaf CA + provider, _ := s1.caManager.getCAProvider() + + before, err := provider.ActiveLeafSigningCert() + require.NoError(t, err) + vaultToken2 := ca.CreateVaultTokenWithAttrs(t, vault.Client(), &ca.VaultTokenAttributes{ RootPath: "pki-root-2", - IntermediatePath: "pki-intermediate-2", + IntermediatePath: "pki-intermediate", ConsulManaged: true, + WithSudo: true, }) - err = s1.caManager.UpdateConfiguration(&structs.CARequest{ + require.NoError(t, s1.caManager.UpdateConfiguration(&structs.CARequest{ Config: &structs.CAConfiguration{ Provider: "vault", Config: map[string]interface{}{ "Address": vault.Addr, "Token": vaultToken2, "RootPKIPath": "pki-root-2/", - "IntermediatePKIPath": "pki-intermediate-2/", + "IntermediatePKIPath": "pki-intermediate/", + }, + }, + })) + + // fetch the new root from the state store to check that + // raft apply has occurred. + _, newRoot, err := s1.fsm.State().CARootActive(nil) + require.NoError(t, err) + require.Len(t, newRoot.IntermediateCerts, 2, + "expected one cross-sign cert and one local leaf sign cert") + + // Refresh provider + provider, _ = s1.caManager.getCAProvider() + + // Leaf signing cert should have been updated + after, err := provider.ActiveLeafSigningCert() + require.NoError(t, err) + + require.NotEqual(t, before, after, + "expected leaf signing cert to be changed after RootPKIPath was changed") + + cert, err = connect.ParseCert(after) + require.NoError(t, err) + + require.Equal(t, connect.HexString(cert.SubjectKeyId), newRoot.SigningKeyID) + }) + + t.Run("update config, change root and intermediate", func(t *testing.T) { + // Read the active leaf CA + provider, _ := s1.caManager.getCAProvider() + + before, err := provider.ActiveLeafSigningCert() + require.NoError(t, err) + + vaultToken3 := ca.CreateVaultTokenWithAttrs(t, vault.Client(), &ca.VaultTokenAttributes{ + RootPath: "pki-root-3", + IntermediatePath: "pki-intermediate-3", + ConsulManaged: true, + }) + + err = s1.caManager.UpdateConfiguration(&structs.CARequest{ + Config: &structs.CAConfiguration{ + Provider: "vault", + Config: map[string]interface{}{ + "Address": vault.Addr, + "Token": vaultToken3, + "RootPKIPath": "pki-root-3/", + "IntermediatePKIPath": "pki-intermediate-3/", }, }, }) require.NoError(t, err) + // fetch the new root from the state store to check that + // raft apply has occurred. _, newRoot, err := s1.fsm.State().CARootActive(nil) require.NoError(t, err) require.Len(t, newRoot.IntermediateCerts, 2, "expected one cross-sign cert and one local leaf sign cert") - require.NotEqual(t, origRoot.ID, newRoot.ID) - cert, err = connect.ParseCert(s1.caManager.getLeafSigningCertFromRoot(newRoot)) + // Refresh provider + provider, _ = s1.caManager.getCAProvider() + + // Leaf signing cert should have been updated + after, err := provider.ActiveLeafSigningCert() require.NoError(t, err) + + require.NotEqual(t, before, after, + "expected leaf signing cert to be changed after RootPKIPath and IntermediatePKIPath were changed") + + cert, err = connect.ParseCert(after) + require.NoError(t, err) + require.Equal(t, connect.HexString(cert.SubjectKeyId), newRoot.SigningKeyID) }) } diff --git a/agent/consul/leader_connect_test.go b/agent/consul/leader_connect_test.go index 105bf317bdfd..539226b297d3 100644 --- a/agent/consul/leader_connect_test.go +++ b/agent/consul/leader_connect_test.go @@ -1362,10 +1362,12 @@ func TestNewCARoot(t *testing.T) { } run := func(t *testing.T, tc testCase) { - root, err := newCARoot(ca.CAChainResult{ - PEM: tc.pem, - IntermediatePEM: tc.intermediatePem, - }, "provider-name", "cluster-id") + root, err := newCARoot( + tc.pem, + "provider-name", "cluster-id") + if tc.intermediatePem != "" { + setLeafSigningCert(root, tc.intermediatePem) + } if tc.expectedErr != "" { testutil.RequireErrorContains(t, err, tc.expectedErr) return From 5af73901a2887e11824d8b4dc872c89a5ce8662e Mon Sep 17 00:00:00 2001 From: John Murret Date: Fri, 14 Jul 2023 14:53:27 -0600 Subject: [PATCH 31/43] [NET-4897] net/http host header is now verified and request.host that contains socked now error (#18129) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Description This is related to https://github.com/hashicorp/consul/pull/18124 where we pinned the go versions in CI to 1.20.5 and 1.19.10. go 1.20.6 and 1.19.11 now validate request host headers for validity, including the hostname cannot be prefixed with slashes. For local communications (npipe://, unix://), the hostname is not used, but we need valid and meaningful hostname. Prior versions go Go would clean the host header, and strip slashes in the process, but go1.20.6 and go1.19.11 no longer do, and reject the host header. Around the community we are seeing that others are intercepting the req.host and if it starts with a slash or ends with .sock, they changing the host to localhost or another dummy value. [client: define a "dummy" hostname to use for local connections by thaJeztah · Pull Request #45942 · moby/moby](https://github.com/moby/moby/pull/45942) ### Testing & Reproduction steps Check CI tests. ### Links * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern --- .github/workflows/go-tests.yml | 10 +++------- api/api.go | 11 +++++++++++ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index be773c2b0e06..744a398b5aa4 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -376,7 +376,7 @@ jobs: runs-on: ${{ needs.setup.outputs.compute-xl }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" - go-version: "1.19.10" + go-version: "1.19" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. contents: read @@ -395,12 +395,7 @@ jobs: runs-on: ${{ needs.setup.outputs.compute-xl }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" - # pinning this to 1.20.5 because this issue in go-testcontainers occurs - # in 1.20.6 with the error "http: invalid Host header, host port waiting failed" - # https://github.com/testcontainers/testcontainers-go/issues/1359 - # remove setting this when the above issue is fixed so that the reusable - # job will just get the go version from go.mod. - go-version: "1.20.5" + go-version: "1.20" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. contents: read @@ -438,6 +433,7 @@ jobs: runs-on: ${{ needs.setup.outputs.compute-xl }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" + go-version: "1.20" permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. contents: read diff --git a/api/api.go b/api/api.go index 1fe0c71b61e5..18bb3479c9be 100644 --- a/api/api.go +++ b/api/api.go @@ -1000,6 +1000,17 @@ func (r *request) toHTTP() (*http.Request, error) { return nil, err } + // validate that socket communications that do not use the host, detect + // slashes in the host name and replace it with local host. + // this is required since go started validating req.host in 1.20.6 and 1.19.11. + // prior to that they would strip out the slashes for you. They removed that + // behavior and added more strict validation as part of a CVE. + // https://github.com/golang/go/issues/60374 + // the hope is that + if strings.HasPrefix(r.url.Host, "/") { + r.url.Host = "localhost" + } + req.URL.Host = r.url.Host req.URL.Scheme = r.url.Scheme req.Host = r.url.Host From 691bc9673a24a53dfa62cfcc77cd99234f2183a1 Mon Sep 17 00:00:00 2001 From: John Murret Date: Fri, 14 Jul 2023 14:53:44 -0600 Subject: [PATCH 32/43] add a conditional around setting LANFilter.AllSegments to make sure it is valid (#18139) ### Description This is to correct a code problem because this assumes all segments, but when you get to Enterprise, you can be in partition that is not the default partition, in which case specifying all segments does not validate and fails. This is to correct the setting of this filter with `AllSegments` to `true` to only occur when in the the `default` partition. ### Testing & Reproduction steps ### Links ### PR Checklist * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern --- agent/ui_endpoint.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/agent/ui_endpoint.go b/agent/ui_endpoint.go index 8f5184969693..5924e23f6f79 100644 --- a/agent/ui_endpoint.go +++ b/agent/ui_endpoint.go @@ -190,7 +190,10 @@ func AgentMembersMapAddrVer(s *HTTPHandlers, req *http.Request) (map[string]stri filter := consul.LANMemberFilter{ Partition: entMeta.PartitionOrDefault(), } - filter.AllSegments = true + if acl.IsDefaultPartition(filter.Partition) { + filter.AllSegments = true + } + lanMembers, err := s.agent.delegate.LANMembers(filter) if err != nil { return nil, err From 05b665e856a5b731422b54557f2f8ff83c977471 Mon Sep 17 00:00:00 2001 From: Nick Irvine <115657443+nfi-hashicorp@users.noreply.github.com> Date: Fri, 14 Jul 2023 14:13:41 -0700 Subject: [PATCH 33/43] chore: bump upgrade integrations tests to 1.15, 116 [NET-4743] (#18130) --- .github/workflows/test-integrations.yml | 4 ++-- test/integration/consul-container/libs/utils/version.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-integrations.yml b/.github/workflows/test-integrations.yml index 1a3876855d5e..37a53a85cb7d 100644 --- a/.github/workflows/test-integrations.yml +++ b/.github/workflows/test-integrations.yml @@ -22,7 +22,7 @@ env: GOTESTSUM_VERSION: "1.10.1" CONSUL_BINARY_UPLOAD_NAME: consul-bin # strip the hashicorp/ off the front of github.repository for consul - CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'consul' }} + CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'hashicorp/consul' }} GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: @@ -479,7 +479,7 @@ jobs: strategy: fail-fast: false matrix: - consul-version: [ "1.14", "1.15"] + consul-version: [ "1.15", "1.16"] env: CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} ENVOY_VERSION: "1.24.6" diff --git a/test/integration/consul-container/libs/utils/version.go b/test/integration/consul-container/libs/utils/version.go index 394cae35b116..91c4da5e3126 100644 --- a/test/integration/consul-container/libs/utils/version.go +++ b/test/integration/consul-container/libs/utils/version.go @@ -24,7 +24,7 @@ var ( ) const ( - DefaultImageNameOSS = "consul" + DefaultImageNameOSS = "hashicorp/consul" DefaultImageNameENT = "hashicorp/consul-enterprise" ImageVersionSuffixENT = "-ent" ) From e7194787a7ec5ba4ec1b809d3635d8aab49985ea Mon Sep 17 00:00:00 2001 From: wangxinyi7 <121973291+wangxinyi7@users.noreply.github.com> Date: Fri, 14 Jul 2023 18:00:17 -0700 Subject: [PATCH 34/43] re org resource type registry (#18133) --- agent/consul/client_test.go | 3 +++ agent/consul/options.go | 2 ++ agent/consul/server.go | 28 ++++++++++------------------ agent/consul/type_registry.go | 25 +++++++++++++++++++++++++ agent/rpc/peering/service_test.go | 3 +++ agent/setup.go | 2 ++ docs/resources/guide.md | 18 ++++++++++-------- 7 files changed, 55 insertions(+), 26 deletions(-) create mode 100644 agent/consul/type_registry.go diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index 4b8f5c433d8e..174820c0673a 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -14,6 +14,8 @@ import ( "testing" "time" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/require" @@ -576,6 +578,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps { GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor, EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c), XDSStreamLimiter: limiter.NewSessionLimiter(), + Registry: resource.NewRegistry(), } } diff --git a/agent/consul/options.go b/agent/consul/options.go index 26cb2471a89b..4b1d088249cc 100644 --- a/agent/consul/options.go +++ b/agent/consul/options.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/consul/agent/router" "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/token" + "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/tlsutil" ) @@ -29,6 +30,7 @@ type Deps struct { GRPCConnPool GRPCClientConner LeaderForwarder LeaderForwarder XDSStreamLimiter *limiter.SessionLimiter + Registry resource.Registry // GetNetRPCInterceptorFunc, if not nil, sets the net/rpc rpc.ServerServiceCallInterceptor on // the server side to record metrics around the RPC requests. If nil, no interceptor is added to // the rpc server. diff --git a/agent/consul/server.go b/agent/consul/server.go index 2cfe9cb0aae8..6e5ea29da082 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -19,6 +19,8 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/consul/internal/resource" + "github.com/armon/go-metrics" "github.com/hashicorp/go-connlimit" "github.com/hashicorp/go-hclog" @@ -72,8 +74,6 @@ import ( "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/internal/resource/reaper" raftstorage "github.com/hashicorp/consul/internal/storage/raft" @@ -439,9 +439,6 @@ type Server struct { // run by the Server routineManager *routine.Manager - // typeRegistry contains Consul's registered resource types. - typeRegistry resource.Registry - // resourceServiceServer implements the Resource Service. resourceServiceServer *resourcegrpc.Server @@ -536,7 +533,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, incom publisher: flat.EventPublisher, incomingRPCLimiter: incomingRPCLimiter, routineManager: routine.NewManager(logger.Named(logging.ConsulServer)), - typeRegistry: resource.NewRegistry(), } incomingRPCLimiter.Register(s) @@ -804,7 +800,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, incom go s.reportingManager.Run(&lib.StopChannelContext{StopCh: s.shutdownCh}) // Initialize external gRPC server - s.setupExternalGRPC(config, logger) + s.setupExternalGRPC(config, flat.Registry, logger) // Initialize internal gRPC server. // @@ -817,7 +813,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, incom return nil, err } - if err := s.setupInsecureResourceServiceClient(logger); err != nil { + if err := s.setupInsecureResourceServiceClient(flat.Registry, logger); err != nil { return nil, err } @@ -825,7 +821,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, incom s.insecureResourceServiceClient, logger.Named(logging.ControllerRuntime), ) - s.registerResources(flat) + s.registerControllers(flat) go s.controllerManager.Run(&lib.StopChannelContext{StopCh: shutdownCh}) go s.trackLeaderChanges() @@ -876,18 +872,14 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, incom return s, nil } -func (s *Server) registerResources(deps Deps) { +func (s *Server) registerControllers(deps Deps) { if stringslice.Contains(deps.Experiments, catalogResourceExperimentName) { - catalog.RegisterTypes(s.typeRegistry) catalog.RegisterControllers(s.controllerManager, catalog.DefaultControllerDependencies()) - - mesh.RegisterTypes(s.typeRegistry) } reaper.RegisterControllers(s.controllerManager) if s.config.DevMode { - demo.RegisterTypes(s.typeRegistry) demo.RegisterControllers(s.controllerManager) } } @@ -1285,7 +1277,7 @@ func (s *Server) setupRPC() error { } // Initialize and register services on external gRPC server. -func (s *Server) setupExternalGRPC(config *Config, logger hclog.Logger) { +func (s *Server) setupExternalGRPC(config *Config, typeRegistry resource.Registry, logger hclog.Logger) { s.externalACLServer = aclgrpc.NewServer(aclgrpc.Config{ ACLsEnabled: s.config.ACLsEnabled, ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { @@ -1351,7 +1343,7 @@ func (s *Server) setupExternalGRPC(config *Config, logger hclog.Logger) { s.peerStreamServer.Register(s.externalGRPCServer) s.resourceServiceServer = resourcegrpc.NewServer(resourcegrpc.Config{ - Registry: s.typeRegistry, + Registry: typeRegistry, Backend: s.raftStorageBackend, ACLResolver: s.ACLResolver, Logger: logger.Named("grpc-api.resource"), @@ -1359,9 +1351,9 @@ func (s *Server) setupExternalGRPC(config *Config, logger hclog.Logger) { s.resourceServiceServer.Register(s.externalGRPCServer) } -func (s *Server) setupInsecureResourceServiceClient(logger hclog.Logger) error { +func (s *Server) setupInsecureResourceServiceClient(typeRegistry resource.Registry, logger hclog.Logger) error { server := resourcegrpc.NewServer(resourcegrpc.Config{ - Registry: s.typeRegistry, + Registry: typeRegistry, Backend: s.raftStorageBackend, ACLResolver: resolver.DANGER_NO_AUTH{}, Logger: logger.Named("grpc-api.resource"), diff --git a/agent/consul/type_registry.go b/agent/consul/type_registry.go new file mode 100644 index 000000000000..1e5ba55a0ce9 --- /dev/null +++ b/agent/consul/type_registry.go @@ -0,0 +1,25 @@ +package consul + +import ( + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/mesh" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/demo" +) + +// NewTypeRegistry returns a registry populated with all supported resource +// types. +// +// Note: the registry includes resource types that may not be suitable for +// production use (e.g. experimental or development resource types) because +// it is used in the CLI, where feature flags and other runtime configuration +// may not be available. +func NewTypeRegistry() resource.Registry { + registry := resource.NewRegistry() + + demo.RegisterTypes(registry) + mesh.RegisterTypes(registry) + catalog.RegisterTypes(registry) + + return registry +} diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index 9ae1f6597700..0af92f531629 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -15,6 +15,8 @@ import ( "testing" "time" + "github.com/hashicorp/consul/internal/resource" + "github.com/google/tcpproxy" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" @@ -1950,6 +1952,7 @@ func newDefaultDeps(t *testing.T, c *consul.Config) consul.Deps { NewRequestRecorderFunc: middleware.NewRequestRecorder, GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor, XDSStreamLimiter: limiter.NewSessionLimiter(), + Registry: resource.NewRegistry(), } } diff --git a/agent/setup.go b/agent/setup.go index 4d5d0feed7a1..7599668e33ed 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -260,6 +260,8 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer, providedLogger hcl d.XDSStreamLimiter = limiter.NewSessionLimiter() + d.Registry = consul.NewTypeRegistry() + return d, nil } diff --git a/docs/resources/guide.md b/docs/resources/guide.md index b3f389c006a6..c19566577b72 100644 --- a/docs/resources/guide.md +++ b/docs/resources/guide.md @@ -61,8 +61,10 @@ func RegisterTypes(r resource.Registry) { } ``` -Update the `registerResources` method in [`server.go`] to call your package's -type registration method: +Update the `NewTypeRegistry` method in [`type_registry.go`] to call your +package's type registration method: + +[`type_registry.go`]: ../../agent/consul/type_registry.go ```Go import ( @@ -71,15 +73,13 @@ import ( // … ) -func (s *Server) registerResources() { +func NewTypeRegistry() resource.Registry { // … - foo.RegisterTypes(s.typeRegistry) + foo.RegisterTypes(registry) // … } ``` -[`server.go`]: ../../agent/consul/server.go - That should be all you need to start using your new resource type. Test it out by starting an agent in dev mode: @@ -277,7 +277,9 @@ func (barReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c Next, register your controller with the controller manager. Another common pattern is to have your package expose a method for registering controllers, -which is also called from `registerResources` in [`server.go`]. +which is called from `registerControllers` in [`server.go`]. + +[`server.go`]: ../../agent/consul/server.go ```Go package foo @@ -290,7 +292,7 @@ func RegisterControllers(mgr *controller.Manager) { ```Go package consul -func (s *Server) registerResources() { +func (s *Server) registerControllers() { // … foo.RegisterControllers(s.controllerManager) // … From 593051848969fe7a69aa5e0815b0fc0b61a33268 Mon Sep 17 00:00:00 2001 From: Poonam Jadhav Date: Mon, 17 Jul 2023 09:44:49 -0400 Subject: [PATCH 35/43] fix: update delegateMock used in ENT (#18149) ### Description The mock is used in `http_ent_test` file which caused lint failures. For OSS->ENT parity adding the same change here. ### Links Identified in OSS->ENT [merge PR](https://github.com/hashicorp/consul-enterprise/pull/6328) ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ --- agent/delegate_mock_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/agent/delegate_mock_test.go b/agent/delegate_mock_test.go index 9f91a6a0d919..7f0593473e74 100644 --- a/agent/delegate_mock_test.go +++ b/agent/delegate_mock_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" + "github.com/hashicorp/consul/proto-public/pbresource" ) type delegateMock struct { @@ -76,3 +77,7 @@ func (m *delegateMock) Stats() map[string]map[string]string { func (m *delegateMock) ReloadConfig(config consul.ReloadableConfig) error { return m.Called(config).Error(0) } + +func (m *delegateMock) ResourceServiceClient() pbresource.ResourceServiceClient { + return nil +} From bcc6a9d7523f0a411ed73b9d21e062aaa1f20daa Mon Sep 17 00:00:00 2001 From: Ronald Date: Mon, 17 Jul 2023 11:32:49 -0400 Subject: [PATCH 36/43] Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062) ### Description - Currently the jwt-auth filter doesn't take into account the service identity when validating jwt-auth, it only takes into account the path and jwt provider during validation. This causes issues when multiple source intentions restrict access to an endpoint with different JWT providers. - To fix these issues, rather than use the JWT auth filter for validation, we use it in metadata mode and allow it to forward the successful validated JWT token payload to the RBAC filter which will make the decisions. This PR ensures requests with and without JWT tokens successfully go through the jwt-authn filter. The filter however only forwards the data for successful/valid tokens. On the RBAC filter level, we check the payload for claims and token issuer + existing rbac rules. ### Testing & Reproduction steps - This test covers a multi level jwt requirements (requirements at top level and permissions level). It also assumes you have envoy running, you have a redis and a sidecar proxy service registered, and have a way to generate jwks with jwt. I mostly use: https://www.scottbrady91.com/tools/jwt for this. - first write your proxy defaults ``` Kind = "proxy-defaults" name = "global" config { protocol = "http" } ``` - Create two providers ``` Kind = "jwt-provider" Name = "auth0" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjog....." } } ``` ``` Kind = "jwt-provider" Name = "okta" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjogW3...." } } ``` - add a service intention ``` Kind = "service-intentions" Name = "redis" JWT = { Providers = [ { Name = "okta" }, ] } Sources = [ { Name = "*" Permissions = [{ Action = "allow" HTTP = { PathPrefix = "/workspace" } JWT = { Providers = [ { Name = "okta" VerifyClaims = [ { Path = ["aud"] Value = "my_client_app" }, { Path = ["sub"] Value = "5be86359073c434bad2da3932222dabe" } ] }, ] } }, { Action = "allow" HTTP = { PathPrefix = "/" } JWT = { Providers = [ { Name = "auth0" }, ] } }] } ] ``` - generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with different claims than `/workspace` expects and 1 with correct claims - connect to your envoy (change service and address as needed) to view logs and potential errors. You can add: `-- --log-level debug` to see what data is being forwarded ``` consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502 ``` - Make the following requests: ``` curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v Successful request ``` ### TODO * [x] Update test coverage * [ ] update integration tests (follow-up PR) * [x] appropriate backport labels added --- agent/xds/jwt_authn.go | 210 ++++++----- agent/xds/jwt_authn_test.go | 235 ++----------- agent/xds/listeners.go | 9 +- agent/xds/rbac.go | 328 ++++++++++++------ agent/xds/rbac_test.go | 56 ++- .../jwt_authn/intention-with-path.golden | 13 +- .../testdata/jwt_authn/local-provider.golden | 7 +- ...ltiple-providers-and-one-permission.golden | 52 ++- .../xds/testdata/jwt_authn/no-provider.golden | 1 + .../testdata/jwt_authn/remote-provider.golden | 7 +- .../top-level-provider-with-permission.golden | 30 +- ...jwt-with-one-permission--httpfilter.golden | 58 ++-- ...evel-jwt-no-permissions--httpfilter.golden | 16 +- ...th-multiple-permissions--httpfilter.golden | 136 ++++++-- ...jwt-with-one-permission--httpfilter.golden | 62 +++- .../test/jwtauth/jwt_auth_test.go | 4 +- 16 files changed, 634 insertions(+), 590 deletions(-) create mode 100644 agent/xds/testdata/jwt_authn/no-provider.golden diff --git a/agent/xds/jwt_authn.go b/agent/xds/jwt_authn.go index ba1c17bbc209..17b34e5cd6ce 100644 --- a/agent/xds/jwt_authn.go +++ b/agent/xds/jwt_authn.go @@ -13,6 +13,7 @@ import ( envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "github.com/hashicorp/consul/agent/structs" "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/wrapperspb" ) @@ -22,129 +23,149 @@ const ( jwksClusterPrefix = "jwks_cluster" ) -// This is an intermediate JWTProvider form used to associate -// unique payload keys to providers -type jwtAuthnProvider struct { - ComputedName string - Provider *structs.IntentionJWTProvider -} - -func makeJWTAuthFilter(pCE map[string]*structs.JWTProviderConfigEntry, intentions structs.SimplifiedIntentions) (*envoy_http_v3.HttpFilter, error) { +// makeJWTAuthFilter builds jwt filter for envoy. It limits its use to referenced provider rather than every provider. +// +// Eg. If you have three providers: okta, auth0 and fusionAuth and only okta is referenced in your intentions, then this +// will create a jwt-auth filter containing just okta in the list of providers. +func makeJWTAuthFilter(providerMap map[string]*structs.JWTProviderConfigEntry, intentions structs.SimplifiedIntentions) (*envoy_http_v3.HttpFilter, error) { providers := map[string]*envoy_http_jwt_authn_v3.JwtProvider{} - var rules []*envoy_http_jwt_authn_v3.RequirementRule + var jwtRequirements []*envoy_http_jwt_authn_v3.JwtRequirement for _, intention := range intentions { if intention.JWT == nil && !hasJWTconfig(intention.Permissions) { continue } - for _, jwtReq := range collectJWTAuthnProviders(intention) { - if _, ok := providers[jwtReq.ComputedName]; ok { + for _, p := range collectJWTProviders(intention) { + providerName := p.Name + if _, ok := providers[providerName]; ok { continue } - jwtProvider, ok := pCE[jwtReq.Provider.Name] - + providerCE, ok := providerMap[providerName] if !ok { - return nil, fmt.Errorf("provider specified in intention does not exist. Provider name: %s", jwtReq.Provider.Name) + return nil, fmt.Errorf("provider specified in intention does not exist. Provider name: %s", providerName) } - // If intention permissions use HTTP-match criteria with - // VerifyClaims, then generate a clone of the jwt provider with a - // unique key for payload_in_metadata. The RBAC filter relies on - // the key to check the correct claims for the matched request. - envoyCfg, err := buildJWTProviderConfig(jwtProvider, jwtReq.ComputedName) + + envoyCfg, err := buildJWTProviderConfig(providerCE) if err != nil { return nil, err } - providers[jwtReq.ComputedName] = envoyCfg - } - - for k, perm := range intention.Permissions { - if perm.JWT == nil { - continue - } - for _, prov := range perm.JWT.Providers { - rule := buildRouteRule(prov, perm, "/", k) - rules = append(rules, rule) - } - } - - if intention.JWT != nil { - for _, provider := range intention.JWT.Providers { - // The top-level provider applies to all requests. - rule := buildRouteRule(provider, nil, "/", 0) - rules = append(rules, rule) - } + providers[providerName] = envoyCfg + reqs := providerToJWTRequirement(providerCE) + jwtRequirements = append(jwtRequirements, reqs) } } - if len(intentions) == 0 && len(providers) == 0 { - //do not add jwt_authn filter when intentions don't have JWT + if len(jwtRequirements) == 0 { + //do not add jwt_authn filter when intentions don't have JWTs return nil, nil } cfg := &envoy_http_jwt_authn_v3.JwtAuthentication{ Providers: providers, - Rules: rules, + Rules: []*envoy_http_jwt_authn_v3.RequirementRule{ + { + Match: &envoy_route_v3.RouteMatch{ + PathSpecifier: &envoy_route_v3.RouteMatch_Prefix{Prefix: "/"}, + }, + RequirementType: makeJWTRequirementRule(andJWTRequirements(jwtRequirements)), + }, + }, } return makeEnvoyHTTPFilter(jwtEnvoyFilter, cfg) } -func collectJWTAuthnProviders(i *structs.Intention) []*jwtAuthnProvider { - var reqs []*jwtAuthnProvider +func makeJWTRequirementRule(r *envoy_http_jwt_authn_v3.JwtRequirement) *envoy_http_jwt_authn_v3.RequirementRule_Requires { + return &envoy_http_jwt_authn_v3.RequirementRule_Requires{ + Requires: r, + } +} - if i.JWT != nil { - for _, prov := range i.JWT.Providers { - reqs = append(reqs, &jwtAuthnProvider{Provider: prov, ComputedName: makeComputedProviderName(prov.Name, nil, 0)}) +// andJWTRequirements combines list of jwt requirements into a single jwt requirement. +func andJWTRequirements(reqs []*envoy_http_jwt_authn_v3.JwtRequirement) *envoy_http_jwt_authn_v3.JwtRequirement { + switch len(reqs) { + case 0: + return nil + case 1: + return reqs[0] + default: + return &envoy_http_jwt_authn_v3.JwtRequirement{ + RequiresType: &envoy_http_jwt_authn_v3.JwtRequirement_RequiresAll{ + RequiresAll: &envoy_http_jwt_authn_v3.JwtRequirementAndList{ + Requirements: reqs, + }, + }, } } +} - reqs = append(reqs, getPermissionsProviders(i.Permissions)...) +// providerToJWTRequirement builds the envoy jwtRequirement. +// +// Note: since the rbac filter is in charge of making decisions of allow/denied, this +// requirement uses `allow_missing_or_failed` to ensure it is always satisfied. +func providerToJWTRequirement(provider *structs.JWTProviderConfigEntry) *envoy_http_jwt_authn_v3.JwtRequirement { + return &envoy_http_jwt_authn_v3.JwtRequirement{ + RequiresType: &envoy_http_jwt_authn_v3.JwtRequirement_RequiresAny{ + RequiresAny: &envoy_http_jwt_authn_v3.JwtRequirementOrList{ + Requirements: []*envoy_http_jwt_authn_v3.JwtRequirement{ + { + RequiresType: &envoy_http_jwt_authn_v3.JwtRequirement_ProviderName{ + ProviderName: provider.Name, + }, + }, + // We use allowMissingOrFailed to allow rbac filter to do the validation + { + RequiresType: &envoy_http_jwt_authn_v3.JwtRequirement_AllowMissingOrFailed{ + AllowMissingOrFailed: &emptypb.Empty{}, + }, + }, + }, + }, + }, + } +} + +// collectJWTProviders returns a list of all top level and permission level referenced providers. +func collectJWTProviders(i *structs.Intention) []*structs.IntentionJWTProvider { + // get permission level providers + reqs := getPermissionsProviders(i.Permissions) + + if i.JWT != nil { + // get top level providers + reqs = append(reqs, i.JWT.Providers...) + } return reqs } -func getPermissionsProviders(p []*structs.IntentionPermission) []*jwtAuthnProvider { - var reqs []*jwtAuthnProvider - for k, perm := range p { - if perm.JWT == nil { +func getPermissionsProviders(perms []*structs.IntentionPermission) []*structs.IntentionJWTProvider { + var reqs []*structs.IntentionJWTProvider + for _, p := range perms { + if p.JWT == nil { continue } - for _, prov := range perm.JWT.Providers { - reqs = append(reqs, &jwtAuthnProvider{Provider: prov, ComputedName: makeComputedProviderName(prov.Name, perm, k)}) - } + + reqs = append(reqs, p.JWT.Providers...) } return reqs } -// makeComputedProviderName is used to create names for unique provider per permission -// This is to stop jwt claims cross validation across permissions/providers. -// -// eg. If Permission x is the 3rd permission and has a provider of original name okta -// this function will return okta_3 as the computed provider name -func makeComputedProviderName(name string, perm *structs.IntentionPermission, idx int) string { - if perm == nil { - return name - } - return fmt.Sprintf("%s_%d", name, idx) -} - -// buildPayloadInMetadataKey is used to create a unique payload key per provider/permissions. -// This is to ensure claims are validated/forwarded specifically under the right permission/path -// and ensure we don't accidentally validate claims from different permissions/providers. +// buildPayloadInMetadataKey is used to create a unique payload key per provider. +// This is to ensure claims are validated/forwarded specifically under the right provider. +// The forwarded payload is used with other data (eg. service identity) by the RBAC filter +// to validate access to resource. // -// eg. With a provider named okta, the second permission in permission list will have a provider of: -// okta_2 and a payload key of: jwt_payload_okta_2. Whereas an okta provider with no specific permission -// will have a payload key of: jwt_payload_okta -func buildPayloadInMetadataKey(providerName string, perm *structs.IntentionPermission, idx int) string { - return fmt.Sprintf("%s_%s", jwtMetadataKeyPrefix, makeComputedProviderName(providerName, perm, idx)) +// eg. With a provider named okta will have a payload key of: jwt_payload_okta +func buildPayloadInMetadataKey(providerName string) string { + return jwtMetadataKeyPrefix + "_" + providerName } -func buildJWTProviderConfig(p *structs.JWTProviderConfigEntry, metadataKeySuffix string) (*envoy_http_jwt_authn_v3.JwtProvider, error) { +func buildJWTProviderConfig(p *structs.JWTProviderConfigEntry) (*envoy_http_jwt_authn_v3.JwtProvider, error) { envoyCfg := envoy_http_jwt_authn_v3.JwtProvider{ Issuer: p.Issuer, Audiences: p.Audiences, - PayloadInMetadata: buildPayloadInMetadataKey(metadataKeySuffix, nil, 0), + PayloadInMetadata: buildPayloadInMetadataKey(p.Name), } if p.Forwarding != nil { @@ -262,43 +283,6 @@ func buildJWTRetryPolicy(r *structs.JWKSRetryPolicy) *envoy_core_v3.RetryPolicy return &pol } -func buildRouteRule(provider *structs.IntentionJWTProvider, perm *structs.IntentionPermission, defaultPrefix string, permIdx int) *envoy_http_jwt_authn_v3.RequirementRule { - rule := &envoy_http_jwt_authn_v3.RequirementRule{ - Match: &envoy_route_v3.RouteMatch{ - PathSpecifier: &envoy_route_v3.RouteMatch_Prefix{Prefix: defaultPrefix}, - }, - RequirementType: &envoy_http_jwt_authn_v3.RequirementRule_Requires{ - Requires: &envoy_http_jwt_authn_v3.JwtRequirement{ - RequiresType: &envoy_http_jwt_authn_v3.JwtRequirement_ProviderName{ - ProviderName: makeComputedProviderName(provider.Name, perm, permIdx), - }, - }, - }, - } - - if perm != nil && perm.HTTP != nil { - if perm.HTTP.PathPrefix != "" { - rule.Match.PathSpecifier = &envoy_route_v3.RouteMatch_Prefix{ - Prefix: perm.HTTP.PathPrefix, - } - } - - if perm.HTTP.PathExact != "" { - rule.Match.PathSpecifier = &envoy_route_v3.RouteMatch_Path{ - Path: perm.HTTP.PathExact, - } - } - - if perm.HTTP.PathRegex != "" { - rule.Match.PathSpecifier = &envoy_route_v3.RouteMatch_SafeRegex{ - SafeRegex: makeEnvoyRegexMatch(perm.HTTP.PathRegex), - } - } - } - - return rule -} - func hasJWTconfig(p []*structs.IntentionPermission) bool { for _, perm := range p { if perm.JWT != nil { diff --git a/agent/xds/jwt_authn_test.go b/agent/xds/jwt_authn_test.go index b2a7d7ce54df..ab8665b1dc3a 100644 --- a/agent/xds/jwt_authn_test.go +++ b/agent/xds/jwt_authn_test.go @@ -9,7 +9,6 @@ import ( "testing" envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" envoy_http_jwt_authn_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v3" "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/require" @@ -173,6 +172,10 @@ func TestMakeJWTAUTHFilters(t *testing.T) { intentions structs.SimplifiedIntentions provider map[string]*structs.JWTProviderConfigEntry }{ + "no-provider": { + intentions: simplified(makeTestIntention(t, ixnOpts{src: "web", action: structs.IntentionActionAllow})), + provider: nil, + }, "remote-provider": { intentions: simplified(makeTestIntention(t, ixnOpts{src: "web", action: structs.IntentionActionAllow, jwt: oktaIntention})), provider: remoteCE, @@ -206,123 +209,45 @@ func TestMakeJWTAUTHFilters(t *testing.T) { } } -func TestMakeComputedProviderName(t *testing.T) { - tests := map[string]struct { - name string - perm *structs.IntentionPermission - idx int - expected string - }{ - "no-permissions": { - name: "okta", - idx: 0, - expected: "okta", - }, - "exact-path-permission": { - name: "auth0", - perm: &structs.IntentionPermission{ - HTTP: &structs.IntentionHTTPPermission{ - PathExact: "admin", - }, - }, - idx: 5, - expected: "auth0_5", - }, - } - - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - reqs := makeComputedProviderName(tt.name, tt.perm, tt.idx) - require.Equal(t, reqs, tt.expected) - }) - } -} - -func TestBuildPayloadInMetadataKey(t *testing.T) { - tests := map[string]struct { - name string - perm *structs.IntentionPermission - permIdx int - expected string - }{ - "no-permissions": { - name: "okta", - expected: "jwt_payload_okta", - }, - "path-prefix-permission": { - name: "auth0", - perm: &structs.IntentionPermission{ - HTTP: &structs.IntentionHTTPPermission{ - PathPrefix: "admin", - }, - }, - permIdx: 4, - expected: "jwt_payload_auth0_4", - }, - } - - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - reqs := buildPayloadInMetadataKey(tt.name, tt.perm, tt.permIdx) - require.Equal(t, reqs, tt.expected) - }) - } -} - -func TestCollectJWTAuthnProviders(t *testing.T) { +func TestCollectJWTProviders(t *testing.T) { tests := map[string]struct { intention *structs.Intention - expected []*jwtAuthnProvider + expected []*structs.IntentionJWTProvider }{ "empty-top-level-jwt-and-empty-permissions": { intention: makeTestIntention(t, ixnOpts{src: "web"}), - expected: []*jwtAuthnProvider{}, + expected: []*structs.IntentionJWTProvider{}, }, "top-level-jwt-and-empty-permissions": { intention: makeTestIntention(t, ixnOpts{src: "web", jwt: oktaIntention}), - expected: []*jwtAuthnProvider{{Provider: &oktaProvider, ComputedName: oktaProvider.Name}}, + expected: []*structs.IntentionJWTProvider{&oktaProvider}, }, "multi-top-level-jwt-and-empty-permissions": { intention: makeTestIntention(t, ixnOpts{src: "web", jwt: multiProviderIntentions}), - expected: []*jwtAuthnProvider{ - {Provider: &oktaProvider, ComputedName: oktaProvider.Name}, - {Provider: &auth0Provider, ComputedName: auth0Provider.Name}, - }, + expected: []*structs.IntentionJWTProvider{&oktaProvider, &auth0Provider}, }, "top-level-jwt-and-one-jwt-permission": { intention: makeTestIntention(t, ixnOpts{src: "web", jwt: auth0Intention, perms: pWithOktaProvider}), - expected: []*jwtAuthnProvider{ - {Provider: &auth0Provider, ComputedName: auth0Provider.Name}, - {Provider: &oktaProvider, ComputedName: "okta_0"}, - }, + expected: []*structs.IntentionJWTProvider{&auth0Provider, &oktaProvider}, }, "top-level-jwt-and-multi-jwt-permissions": { intention: makeTestIntention(t, ixnOpts{src: "web", jwt: fakeIntention, perms: pWithMultiProviders}), - expected: []*jwtAuthnProvider{ - {Provider: &fakeProvider, ComputedName: fakeProvider.Name}, - {Provider: &oktaProvider, ComputedName: "okta_0"}, - {Provider: &auth0Provider, ComputedName: "auth0_0"}, - }, + expected: []*structs.IntentionJWTProvider{&fakeProvider, &oktaProvider, &auth0Provider}, }, "empty-top-level-jwt-and-one-jwt-permission": { intention: makeTestIntention(t, ixnOpts{src: "web", perms: pWithOktaProvider}), - expected: []*jwtAuthnProvider{{Provider: &oktaProvider, ComputedName: "okta_0"}}, + expected: []*structs.IntentionJWTProvider{&oktaProvider}, }, "empty-top-level-jwt-and-multi-jwt-permission": { intention: makeTestIntention(t, ixnOpts{src: "web", perms: pWithMultiProviders}), - expected: []*jwtAuthnProvider{ - {Provider: &oktaProvider, ComputedName: "okta_0"}, - {Provider: &auth0Provider, ComputedName: "auth0_0"}, - }, + expected: []*structs.IntentionJWTProvider{&oktaProvider, &auth0Provider}, }, } for name, tt := range tests { tt := tt t.Run(name, func(t *testing.T) { - reqs := collectJWTAuthnProviders(tt.intention) + reqs := collectJWTProviders(tt.intention) require.ElementsMatch(t, reqs, tt.expected) }) } @@ -331,43 +256,35 @@ func TestCollectJWTAuthnProviders(t *testing.T) { func TestGetPermissionsProviders(t *testing.T) { tests := map[string]struct { perms []*structs.IntentionPermission - expected []*jwtAuthnProvider + expected []*structs.IntentionJWTProvider }{ "empty-permissions": { perms: []*structs.IntentionPermission{}, - expected: []*jwtAuthnProvider{}, + expected: []*structs.IntentionJWTProvider{}, }, "nil-permissions": { perms: nil, - expected: []*jwtAuthnProvider{}, + expected: []*structs.IntentionJWTProvider{}, }, "permissions-with-no-jwt": { perms: []*structs.IntentionPermission{pWithNoJWT}, - expected: []*jwtAuthnProvider{}, + expected: []*structs.IntentionJWTProvider{}, }, "permissions-with-one-jwt": { - perms: []*structs.IntentionPermission{pWithOktaProvider, pWithNoJWT}, - expected: []*jwtAuthnProvider{ - {Provider: &oktaProvider, ComputedName: "okta_0"}, - }, + perms: []*structs.IntentionPermission{pWithOktaProvider, pWithNoJWT}, + expected: []*structs.IntentionJWTProvider{&oktaProvider}, }, "permissions-with-multiple-jwt": { - perms: []*structs.IntentionPermission{pWithMultiProviders, pWithNoJWT}, - expected: []*jwtAuthnProvider{ - {Provider: &auth0Provider, ComputedName: "auth0_0"}, - {Provider: &oktaProvider, ComputedName: "okta_0"}, - }, + perms: []*structs.IntentionPermission{pWithMultiProviders, pWithNoJWT}, + expected: []*structs.IntentionJWTProvider{&auth0Provider, &oktaProvider}, }, } for name, tt := range tests { tt := tt t.Run(name, func(t *testing.T) { - t.Run("getPermissionsProviders", func(t *testing.T) { - p := getPermissionsProviders(tt.perms) - - require.ElementsMatch(t, p, tt.expected) - }) + p := getPermissionsProviders(tt.perms) + require.ElementsMatch(t, p, tt.expected) }) } } @@ -415,7 +332,7 @@ func TestBuildJWTProviderConfig(t *testing.T) { Issuer: fullCE.Issuer, Audiences: fullCE.Audiences, ForwardPayloadHeader: "user-token", - PayloadInMetadata: buildPayloadInMetadataKey(ceRemoteJWKS.Name, nil, 0), + PayloadInMetadata: buildPayloadInMetadataKey(ceRemoteJWKS.Name), PadForwardPayloadHeader: false, Forward: true, JwksSourceSpecifier: &envoy_http_jwt_authn_v3.JwtProvider_LocalJwks{ @@ -433,7 +350,7 @@ func TestBuildJWTProviderConfig(t *testing.T) { expected: &envoy_http_jwt_authn_v3.JwtProvider{ Issuer: fullCE.Issuer, Audiences: fullCE.Audiences, - PayloadInMetadata: buildPayloadInMetadataKey(ceRemoteJWKS.Name, nil, 0), + PayloadInMetadata: buildPayloadInMetadataKey(ceRemoteJWKS.Name), JwksSourceSpecifier: &envoy_http_jwt_authn_v3.JwtProvider_RemoteJwks{ RemoteJwks: &envoy_http_jwt_authn_v3.RemoteJwks{ HttpUri: &envoy_core_v3.HttpUri{ @@ -453,7 +370,7 @@ func TestBuildJWTProviderConfig(t *testing.T) { for name, tt := range tests { tt := tt t.Run(name, func(t *testing.T) { - res, err := buildJWTProviderConfig(tt.ce, tt.ce.GetName()) + res, err := buildJWTProviderConfig(tt.ce) if tt.expectedError != "" { require.Error(t, err) @@ -620,104 +537,6 @@ func TestBuildJWTRetryPolicy(t *testing.T) { } } -func TestBuildRouteRule(t *testing.T) { - var ( - pWithExactPath = &structs.IntentionPermission{ - Action: structs.IntentionActionAllow, - HTTP: &structs.IntentionHTTPPermission{ - PathExact: "/exact-match", - }, - } - pWithRegex = &structs.IntentionPermission{ - Action: structs.IntentionActionAllow, - HTTP: &structs.IntentionHTTPPermission{ - PathRegex: "p([a-z]+)ch", - }, - } - ) - tests := map[string]struct { - provider *structs.IntentionJWTProvider - perm *structs.IntentionPermission - route string - expected *envoy_http_jwt_authn_v3.RequirementRule - }{ - "permission-nil": { - provider: &oktaProvider, - perm: nil, - route: "/my-route", - expected: &envoy_http_jwt_authn_v3.RequirementRule{ - Match: &envoy_route_v3.RouteMatch{PathSpecifier: &envoy_route_v3.RouteMatch_Prefix{Prefix: "/my-route"}}, - RequirementType: &envoy_http_jwt_authn_v3.RequirementRule_Requires{ - Requires: &envoy_http_jwt_authn_v3.JwtRequirement{ - RequiresType: &envoy_http_jwt_authn_v3.JwtRequirement_ProviderName{ - ProviderName: oktaProvider.Name, - }, - }, - }, - }, - }, - "permission-with-path-prefix": { - provider: &oktaProvider, - perm: pWithOktaProvider, - route: "/my-route", - expected: &envoy_http_jwt_authn_v3.RequirementRule{ - Match: &envoy_route_v3.RouteMatch{PathSpecifier: &envoy_route_v3.RouteMatch_Prefix{ - Prefix: pWithMultiProviders.HTTP.PathPrefix, - }}, - RequirementType: &envoy_http_jwt_authn_v3.RequirementRule_Requires{ - Requires: &envoy_http_jwt_authn_v3.JwtRequirement{ - RequiresType: &envoy_http_jwt_authn_v3.JwtRequirement_ProviderName{ - ProviderName: makeComputedProviderName(oktaProvider.Name, pWithMultiProviders, 0), - }, - }, - }, - }, - }, - "permission-with-exact-path": { - provider: &oktaProvider, - perm: pWithExactPath, - route: "/", - expected: &envoy_http_jwt_authn_v3.RequirementRule{ - Match: &envoy_route_v3.RouteMatch{PathSpecifier: &envoy_route_v3.RouteMatch_Path{ - Path: pWithExactPath.HTTP.PathExact, - }}, - RequirementType: &envoy_http_jwt_authn_v3.RequirementRule_Requires{ - Requires: &envoy_http_jwt_authn_v3.JwtRequirement{ - RequiresType: &envoy_http_jwt_authn_v3.JwtRequirement_ProviderName{ - ProviderName: makeComputedProviderName(oktaProvider.Name, pWithExactPath, 0), - }, - }, - }, - }, - }, - "permission-with-regex": { - provider: &oktaProvider, - perm: pWithRegex, - route: "/", - expected: &envoy_http_jwt_authn_v3.RequirementRule{ - Match: &envoy_route_v3.RouteMatch{PathSpecifier: &envoy_route_v3.RouteMatch_SafeRegex{ - SafeRegex: makeEnvoyRegexMatch(pWithRegex.HTTP.PathRegex), - }}, - RequirementType: &envoy_http_jwt_authn_v3.RequirementRule_Requires{ - Requires: &envoy_http_jwt_authn_v3.JwtRequirement{ - RequiresType: &envoy_http_jwt_authn_v3.JwtRequirement_ProviderName{ - ProviderName: makeComputedProviderName(oktaProvider.Name, pWithRegex, 0), - }, - }, - }, - }, - }, - } - - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - res := buildRouteRule(tt.provider, tt.perm, tt.route, 0) - require.Equal(t, res, tt.expected) - }) - } -} - func TestHasJWTconfig(t *testing.T) { tests := map[string]struct { perms []*structs.IntentionPermission diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 6e67cd1c564e..71e5b285e146 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -1291,6 +1291,7 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot partition: cfgSnap.ProxyID.PartitionOrDefault(), }, cfgSnap.ConnectProxy.InboundPeerTrustBundles, + cfgSnap.JWTProviders, ) if err != nil { return nil, err @@ -1364,9 +1365,9 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot logger: s.Logger, } if useHTTPFilter { - jwtFilter, jwtFilterErr := makeJWTAuthFilter(cfgSnap.JWTProviders, cfgSnap.ConnectProxy.Intentions) - if jwtFilterErr != nil { - return nil, jwtFilterErr + jwtFilter, err := makeJWTAuthFilter(cfgSnap.JWTProviders, cfgSnap.ConnectProxy.Intentions) + if err != nil { + return nil, err } rbacFilter, err := makeRBACHTTPFilter( cfgSnap.ConnectProxy.Intentions, @@ -1377,6 +1378,7 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot partition: cfgSnap.ProxyID.PartitionOrDefault(), }, cfgSnap.ConnectProxy.InboundPeerTrustBundles, + cfgSnap.JWTProviders, ) if err != nil { return nil, err @@ -1844,6 +1846,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg. partition: cfgSnap.ProxyID.PartitionOrDefault(), }, nil, // TODO(peering): verify intentions w peers don't apply to terminatingGateway + cfgSnap.JWTProviders, ) if err != nil { return nil, err diff --git a/agent/xds/rbac.go b/agent/xds/rbac.go index 4cb77ad7f028..f38525abb78d 100644 --- a/agent/xds/rbac.go +++ b/agent/xds/rbac.go @@ -28,7 +28,10 @@ func makeRBACNetworkFilter( localInfo rbacLocalInfo, peerTrustBundles []*pbpeering.PeeringTrustBundle, ) (*envoy_listener_v3.Filter, error) { - rules := makeRBACRules(intentions, intentionDefaultAllow, localInfo, false, peerTrustBundles) + rules, err := makeRBACRules(intentions, intentionDefaultAllow, localInfo, false, peerTrustBundles, nil) + if err != nil { + return nil, err + } cfg := &envoy_network_rbac_v3.RBAC{ StatPrefix: "connect_authz", @@ -42,8 +45,12 @@ func makeRBACHTTPFilter( intentionDefaultAllow bool, localInfo rbacLocalInfo, peerTrustBundles []*pbpeering.PeeringTrustBundle, + providerMap map[string]*structs.JWTProviderConfigEntry, ) (*envoy_http_v3.HttpFilter, error) { - rules := makeRBACRules(intentions, intentionDefaultAllow, localInfo, true, peerTrustBundles) + rules, err := makeRBACRules(intentions, intentionDefaultAllow, localInfo, true, peerTrustBundles, providerMap) + if err != nil { + return nil, err + } cfg := &envoy_http_rbac_v3.RBAC{ Rules: rules, @@ -56,7 +63,8 @@ func intentionListToIntermediateRBACForm( localInfo rbacLocalInfo, isHTTP bool, trustBundlesByPeer map[string]*pbpeering.PeeringTrustBundle, -) []*rbacIntention { + providerMap map[string]*structs.JWTProviderConfigEntry, +) ([]*rbacIntention, error) { sort.Sort(structs.IntentionPrecedenceSorter(intentions)) // Omit any lower-precedence intentions that share the same source. @@ -73,10 +81,13 @@ func intentionListToIntermediateRBACForm( continue } - rixn := intentionToIntermediateRBACForm(ixn, localInfo, isHTTP, trustBundle) + rixn, err := intentionToIntermediateRBACForm(ixn, localInfo, isHTTP, trustBundle, providerMap) + if err != nil { + return nil, err + } rbacIxns = append(rbacIxns, rixn) } - return rbacIxns + return rbacIxns, nil } func removeSourcePrecedence(rbacIxns []*rbacIntention, intentionDefaultAction intentionAction, localInfo rbacLocalInfo) []*rbacIntention { @@ -216,7 +227,8 @@ func intentionToIntermediateRBACForm( localInfo rbacLocalInfo, isHTTP bool, bundle *pbpeering.PeeringTrustBundle, -) *rbacIntention { + providerMap map[string]*structs.JWTProviderConfigEntry, +) (*rbacIntention, error) { rixn := &rbacIntention{ Source: rbacService{ ServiceName: ixn.SourceServiceName(), @@ -233,36 +245,41 @@ func intentionToIntermediateRBACForm( } if isHTTP && ixn.JWT != nil { - var c []*JWTInfo + var jwts []*JWTInfo for _, prov := range ixn.JWT.Providers { - if len(prov.VerifyClaims) > 0 { - c = append(c, makeJWTInfos(prov, nil, 0)) + jwtProvider, ok := providerMap[prov.Name] + + if !ok { + return nil, fmt.Errorf("provider specified in intention does not exist. Provider name: %s", prov.Name) } + jwts = append(jwts, newJWTInfo(prov, jwtProvider)) } - if len(c) > 0 { - rixn.jwtInfos = c - } + + rixn.jwtInfos = jwts } if len(ixn.Permissions) > 0 { if isHTTP { rixn.Action = intentionActionLayer7 rixn.Permissions = make([]*rbacPermission, 0, len(ixn.Permissions)) - for k, perm := range ixn.Permissions { + for _, perm := range ixn.Permissions { rbacPerm := rbacPermission{ Definition: perm, Action: intentionActionFromString(perm.Action), Perm: convertPermission(perm), } + if perm.JWT != nil { - var c []*JWTInfo + var jwts []*JWTInfo for _, prov := range perm.JWT.Providers { - if len(prov.VerifyClaims) > 0 { - c = append(c, makeJWTInfos(prov, perm, k)) + jwtProvider, ok := providerMap[prov.Name] + if !ok { + return nil, fmt.Errorf("provider specified in intention does not exist. Provider name: %s", prov.Name) } + jwts = append(jwts, newJWTInfo(prov, jwtProvider)) } - if len(c) > 0 { - rbacPerm.jwtInfos = c + if len(jwts) > 0 { + rbacPerm.jwtInfos = jwts } } rixn.Permissions = append(rixn.Permissions, &rbacPerm) @@ -275,18 +292,24 @@ func intentionToIntermediateRBACForm( rixn.Action = intentionActionFromString(ixn.Action) } - return rixn + return rixn, nil } -func makeJWTInfos(p *structs.IntentionJWTProvider, perm *structs.IntentionPermission, permKey int) *JWTInfo { - return &JWTInfo{Claims: p.VerifyClaims, MetadataPayloadKey: buildPayloadInMetadataKey(p.Name, perm, permKey)} +func newJWTInfo(p *structs.IntentionJWTProvider, ce *structs.JWTProviderConfigEntry) *JWTInfo { + return &JWTInfo{ + Provider: p, + Issuer: ce.Issuer, + } } type intentionAction int type JWTInfo struct { - Claims []*structs.IntentionJWTClaimVerification - MetadataPayloadKey string + // Provider issuer + // this information is coming from the config entry + Issuer string + // Provider is the intention provider + Provider *structs.IntentionJWTProvider } const ( @@ -341,26 +364,32 @@ type rbacIntention struct { } func (r *rbacIntention) FlattenPrincipal(localInfo rbacLocalInfo) *envoy_rbac_v3.Principal { + var principal *envoy_rbac_v3.Principal if !localInfo.expectXFCC { - return r.flattenPrincipalFromCert() - + principal = r.flattenPrincipalFromCert() } else if r.Source.Peer == "" { // NOTE: ixnSourceMatches should enforce that all of Source and NotSources // are peered or not-peered, so we only need to look at the Source element. - return r.flattenPrincipalFromCert() // intention is not relevant to peering - } + principal = r.flattenPrincipalFromCert() // intention is not relevant to peering + } else { + // If this intention is an L7 peered one, then it is exclusively resolvable + // using XFCC, rather than the TLS SAN field. + fromXFCC := r.flattenPrincipalFromXFCC() + + // Use of the XFCC one is gated on coming directly from our own gateways. + gwIDPattern := makeSpiffeMeshGatewayPattern(localInfo.trustDomain, localInfo.partition) - // If this intention is an L7 peered one, then it is exclusively resolvable - // using XFCC, rather than the TLS SAN field. - fromXFCC := r.flattenPrincipalFromXFCC() + principal = andPrincipals([]*envoy_rbac_v3.Principal{ + authenticatedPatternPrincipal(gwIDPattern), + fromXFCC, + }) + } - // Use of the XFCC one is gated on coming directly from our own gateways. - gwIDPattern := makeSpiffeMeshGatewayPattern(localInfo.trustDomain, localInfo.partition) + if len(r.jwtInfos) == 0 { + return principal + } - return andPrincipals([]*envoy_rbac_v3.Principal{ - authenticatedPatternPrincipal(gwIDPattern), - fromXFCC, - }) + return addJWTPrincipal(principal, r.jwtInfos) } func (r *rbacIntention) flattenPrincipalFromCert() *envoy_rbac_v3.Principal { @@ -417,17 +446,47 @@ type rbacPermission struct { ComputedPermission *envoy_rbac_v3.Permission } +// Flatten ensure the permission rules, not-rules, and jwt validation rules are merged into a single computed permission. +// +// Details on JWTInfo section: +// For each JWTInfo (AKA provider required), this builds 1 single permission that validates that the jwt has +// the right issuer (`iss`) field and validates the claims (if any). +// +// After generating a single permission per info, it combines all the info permissions into a single OrPermission. +// This orPermission is then attached to initial computed permission for jwt payload and claims validation. func (p *rbacPermission) Flatten() *envoy_rbac_v3.Permission { - if len(p.NotPerms) == 0 { - return p.Perm + computedPermission := p.Perm + if len(p.NotPerms) == 0 && len(p.jwtInfos) == 0 { + return computedPermission } - parts := make([]*envoy_rbac_v3.Permission, 0, len(p.NotPerms)+1) - parts = append(parts, p.Perm) - for _, notPerm := range p.NotPerms { - parts = append(parts, notPermission(notPerm)) + if len(p.NotPerms) != 0 { + parts := make([]*envoy_rbac_v3.Permission, 0, len(p.NotPerms)+1) + parts = append(parts, p.Perm) + for _, notPerm := range p.NotPerms { + parts = append(parts, notPermission(notPerm)) + } + computedPermission = andPermissions(parts) } - return andPermissions(parts) + + if len(p.jwtInfos) == 0 { + return computedPermission + } + + var jwtPerms []*envoy_rbac_v3.Permission + for _, info := range p.jwtInfos { + payloadKey := buildPayloadInMetadataKey(info.Provider.Name) + claimsPermission := jwtInfosToPermission(info.Provider.VerifyClaims, payloadKey) + issuerPermission := segmentToPermission(pathToSegments([]string{"iss"}, payloadKey), info.Issuer) + + perm := andPermissions([]*envoy_rbac_v3.Permission{ + issuerPermission, claimsPermission, + }) + jwtPerms = append(jwtPerms, perm) + } + + jwtPerm := orPermissions(jwtPerms) + return andPermissions([]*envoy_rbac_v3.Permission{computedPermission, jwtPerm}) } // simplifyNotSourceSlice will collapse NotSources elements together if any element is @@ -526,7 +585,8 @@ func makeRBACRules( localInfo rbacLocalInfo, isHTTP bool, peerTrustBundles []*pbpeering.PeeringTrustBundle, -) *envoy_rbac_v3.RBAC { + providerMap map[string]*structs.JWTProviderConfigEntry, +) (*envoy_rbac_v3.RBAC, error) { // TODO(banks,rb): Implement revocation list checking? // TODO(peering): mkeeler asked that these maps come from proxycfg instead of @@ -546,7 +606,10 @@ func makeRBACRules( } // First build up just the basic principal matches. - rbacIxns := intentionListToIntermediateRBACForm(intentions, localInfo, isHTTP, trustBundlesByPeer) + rbacIxns, err := intentionListToIntermediateRBACForm(intentions, localInfo, isHTTP, trustBundlesByPeer, providerMap) + if err != nil { + return nil, err + } // Normalize: if we are in default-deny then all intentions must be allows and vice versa intentionDefaultAction := intentionActionFromBool(intentionDefaultAllow) @@ -574,10 +637,6 @@ func makeRBACRules( var principalsL4 []*envoy_rbac_v3.Principal for i, rbacIxn := range rbacIxns { - var infos []*JWTInfo - if isHTTP { - infos = collectJWTInfos(rbacIxn) - } if rbacIxn.Action == intentionActionLayer7 { if len(rbacIxn.Permissions) == 0 { panic("invalid state: L7 intention has no permissions") @@ -587,10 +646,6 @@ func makeRBACRules( } rbacPrincipals := optimizePrincipals([]*envoy_rbac_v3.Principal{rbacIxn.ComputedPrincipal}) - if len(infos) > 0 { - claimsPrincipal := jwtInfosToPrincipals(infos) - rbacPrincipals = combineBasePrincipalWithJWTPrincipals(rbacPrincipals, claimsPrincipal) - } // For L7: we should generate one Policy per Principal and list all of the Permissions policy := &envoy_rbac_v3.Policy{ Principals: rbacPrincipals, @@ -603,11 +658,6 @@ func makeRBACRules( } else { // For L4: we should generate one big Policy listing all Principals principalsL4 = append(principalsL4, rbacIxn.ComputedPrincipal) - // Append JWT principals to list of principals - if len(infos) > 0 { - claimsPrincipal := jwtInfosToPrincipals(infos) - principalsL4 = combineBasePrincipalWithJWTPrincipals(principalsL4, claimsPrincipal) - } } } if len(principalsL4) > 0 { @@ -620,59 +670,74 @@ func makeRBACRules( if len(rbac.Policies) == 0 { rbac.Policies = nil } - return rbac + return rbac, nil } -// combineBasePrincipalWithJWTPrincipals ensure each RBAC/Network principal is associated with -// the JWT principal -func combineBasePrincipalWithJWTPrincipals(p []*envoy_rbac_v3.Principal, cp *envoy_rbac_v3.Principal) []*envoy_rbac_v3.Principal { - res := make([]*envoy_rbac_v3.Principal, 0) - - for _, principal := range p { - if principal != nil && cp != nil { - p := andPrincipals([]*envoy_rbac_v3.Principal{principal, cp}) - res = append(res, p) +// addJWTPrincipal ensure the passed RBAC/Network principal is associated with +// a JWT principal when JWTs validation is required. +// +// For each jwtInfo, this builds a first principal that validates that the jwt has the right issuer (`iss`). +// It collects all the claims principal and combines them into a single principal using jwtClaimsToPrincipals. +// It then combines the issuer principal and the claims principal into a single principal. +// +// After generating a single principal per info, it combines all the info principals into a single jwt OrPrincipal. +// This orPrincipal is then attached to the RBAC/NETWORK principal for jwt payload validation. +func addJWTPrincipal(principal *envoy_rbac_v3.Principal, infos []*JWTInfo) *envoy_rbac_v3.Principal { + if len(infos) == 0 { + return principal + } + jwtPrincipals := make([]*envoy_rbac_v3.Principal, 0, len(infos)) + for _, info := range infos { + payloadKey := buildPayloadInMetadataKey(info.Provider.Name) + + // build jwt provider issuer principal + segments := pathToSegments([]string{"iss"}, payloadKey) + p := segmentToPrincipal(segments, info.Issuer) + + // add jwt provider claims principal if any + if cp := jwtClaimsToPrincipals(info.Provider.VerifyClaims, payloadKey); cp != nil { + p = andPrincipals([]*envoy_rbac_v3.Principal{p, cp}) } + jwtPrincipals = append(jwtPrincipals, p) } - return res -} -// collectJWTInfos extracts all the collected JWTInfos top level infos -// and permission level infos and returns them as a single array -func collectJWTInfos(rbacIxn *rbacIntention) []*JWTInfo { - infos := make([]*JWTInfo, 0, len(rbacIxn.jwtInfos)) + // make jwt principals into 1 single principal + jwtFinalPrincipal := orPrincipals(jwtPrincipals) - if len(rbacIxn.jwtInfos) > 0 { - infos = append(infos, rbacIxn.jwtInfos...) - } - for _, perm := range rbacIxn.Permissions { - infos = append(infos, perm.jwtInfos...) + if principal == nil { + return jwtFinalPrincipal } - return infos + return andPrincipals([]*envoy_rbac_v3.Principal{principal, jwtFinalPrincipal}) } -func jwtInfosToPrincipals(c []*JWTInfo) *envoy_rbac_v3.Principal { +func jwtClaimsToPrincipals(claims []*structs.IntentionJWTClaimVerification, payloadkey string) *envoy_rbac_v3.Principal { ps := make([]*envoy_rbac_v3.Principal, 0) - for _, jwtInfo := range c { - if jwtInfo != nil { - for _, claim := range jwtInfo.Claims { - ps = append(ps, jwtClaimToPrincipal(claim, jwtInfo.MetadataPayloadKey)) - } - } + for _, claim := range claims { + ps = append(ps, jwtClaimToPrincipal(claim, payloadkey)) + } + switch len(ps) { + case 0: + return nil + case 1: + return ps[0] + default: + return andPrincipals(ps) } - return orPrincipals(ps) } -// jwtClaimToPrincipal takes in a payloadkey which is the metadata key. This key is generated by using provider name, -// permission index with a jwt_payload prefix. See buildPayloadInMetadataKey in agent/xds/jwt_authn.go +// jwtClaimToPrincipal takes in a payloadkey which is the metadata key. This key is generated by using provider name +// and a jwt_payload prefix. See buildPayloadInMetadataKey in agent/xds/jwt_authn.go // // This uniquely generated payloadKey is the first segment in the path to validate the JWT claims. The subsequent segments // come from the Path included in the IntentionJWTClaimVerification param. func jwtClaimToPrincipal(c *structs.IntentionJWTClaimVerification, payloadKey string) *envoy_rbac_v3.Principal { segments := pathToSegments(c.Path, payloadKey) + return segmentToPrincipal(segments, c.Value) +} +func segmentToPrincipal(segments []*envoy_matcher_v3.MetadataMatcher_PathSegment, v string) *envoy_rbac_v3.Principal { return &envoy_rbac_v3.Principal{ Identifier: &envoy_rbac_v3.Principal_Metadata{ Metadata: &envoy_matcher_v3.MetadataMatcher{ @@ -682,7 +747,41 @@ func jwtClaimToPrincipal(c *structs.IntentionJWTClaimVerification, payloadKey st MatchPattern: &envoy_matcher_v3.ValueMatcher_StringMatch{ StringMatch: &envoy_matcher_v3.StringMatcher{ MatchPattern: &envoy_matcher_v3.StringMatcher_Exact{ - Exact: c.Value, + Exact: v, + }, + }, + }, + }, + }, + }, + } +} + +func jwtInfosToPermission(claims []*structs.IntentionJWTClaimVerification, payloadkey string) *envoy_rbac_v3.Permission { + ps := make([]*envoy_rbac_v3.Permission, 0, len(claims)) + + for _, claim := range claims { + ps = append(ps, jwtClaimToPermission(claim, payloadkey)) + } + return andPermissions(ps) +} + +func jwtClaimToPermission(c *structs.IntentionJWTClaimVerification, payloadKey string) *envoy_rbac_v3.Permission { + segments := pathToSegments(c.Path, payloadKey) + return segmentToPermission(segments, c.Value) +} + +func segmentToPermission(segments []*envoy_matcher_v3.MetadataMatcher_PathSegment, v string) *envoy_rbac_v3.Permission { + return &envoy_rbac_v3.Permission{ + Rule: &envoy_rbac_v3.Permission_Metadata{ + Metadata: &envoy_matcher_v3.MetadataMatcher{ + Filter: jwtEnvoyFilter, + Path: segments, + Value: &envoy_matcher_v3.ValueMatcher{ + MatchPattern: &envoy_matcher_v3.ValueMatcher_StringMatch{ + StringMatch: &envoy_matcher_v3.StringMatcher{ + MatchPattern: &envoy_matcher_v3.StringMatcher_Exact{ + Exact: v, }, }, }, @@ -837,22 +936,32 @@ func countWild(src rbacService) int { } func andPrincipals(ids []*envoy_rbac_v3.Principal) *envoy_rbac_v3.Principal { - return &envoy_rbac_v3.Principal{ - Identifier: &envoy_rbac_v3.Principal_AndIds{ - AndIds: &envoy_rbac_v3.Principal_Set{ - Ids: ids, + switch len(ids) { + case 1: + return ids[0] + default: + return &envoy_rbac_v3.Principal{ + Identifier: &envoy_rbac_v3.Principal_AndIds{ + AndIds: &envoy_rbac_v3.Principal_Set{ + Ids: ids, + }, }, - }, + } } } func orPrincipals(ids []*envoy_rbac_v3.Principal) *envoy_rbac_v3.Principal { - return &envoy_rbac_v3.Principal{ - Identifier: &envoy_rbac_v3.Principal_OrIds{ - OrIds: &envoy_rbac_v3.Principal_Set{ - Ids: ids, + switch len(ids) { + case 1: + return ids[0] + default: + return &envoy_rbac_v3.Principal{ + Identifier: &envoy_rbac_v3.Principal_OrIds{ + OrIds: &envoy_rbac_v3.Principal_Set{ + Ids: ids, + }, }, - }, + } } } @@ -1206,3 +1315,20 @@ func andPermissions(perms []*envoy_rbac_v3.Permission) *envoy_rbac_v3.Permission } } } + +func orPermissions(perms []*envoy_rbac_v3.Permission) *envoy_rbac_v3.Permission { + switch len(perms) { + case 0: + return anyPermission() + case 1: + return perms[0] + default: + return &envoy_rbac_v3.Permission{ + Rule: &envoy_rbac_v3.Permission_OrRules{ + OrRules: &envoy_rbac_v3.Permission_Set{ + Rules: perms, + }, + }, + } + } +} diff --git a/agent/xds/rbac_test.go b/agent/xds/rbac_test.go index 76f4467bffa6..6bc36028707a 100644 --- a/agent/xds/rbac_test.go +++ b/agent/xds/rbac_test.go @@ -451,10 +451,11 @@ func TestRemoveIntentionPrecedence(t *testing.T) { for name, tt := range tests { t.Run(name, func(t *testing.T) { - rbacIxns := intentionListToIntermediateRBACForm(tt.intentions, testLocalInfo, tt.http, testPeerTrustBundle) + rbacIxns, err := intentionListToIntermediateRBACForm(tt.intentions, testLocalInfo, tt.http, testPeerTrustBundle, nil) intentionDefaultAction := intentionActionFromBool(tt.intentionDefaultAllow) rbacIxns = removeIntentionPrecedence(rbacIxns, intentionDefaultAction, testLocalInfo) + require.NoError(t, err) require.Equal(t, tt.expect, rbacIxns) }) } @@ -529,6 +530,10 @@ func TestMakeRBACNetworkAndHTTPFilters(t *testing.T) { {Path: []string{"perms", "role"}, Value: "admin"}, }, } + testJWTProviderConfigEntry = map[string]*structs.JWTProviderConfigEntry{ + "okta": {Name: "okta", Issuer: "mytest.okta-issuer"}, + "auth0": {Name: "auth0", Issuer: "mytest.auth0-issuer"}, + } jwtRequirement = &structs.IntentionJWTRequirement{ Providers: []*structs.IntentionJWTProvider{ &oktaWithClaims, @@ -922,7 +927,7 @@ func TestMakeRBACNetworkAndHTTPFilters(t *testing.T) { }) }) t.Run("http filter", func(t *testing.T) { - filter, err := makeRBACHTTPFilter(tt.intentions, tt.intentionDefaultAllow, testLocalInfo, testPeerTrustBundle) + filter, err := makeRBACHTTPFilter(tt.intentions, tt.intentionDefaultAllow, testLocalInfo, testPeerTrustBundle, testJWTProviderConfigEntry) require.NoError(t, err) t.Run("current", func(t *testing.T) { @@ -1202,7 +1207,7 @@ func TestPathToSegments(t *testing.T) { } } -func TestJwtClaimToPrincipal(t *testing.T) { +func TestJWTClaimsToPrincipals(t *testing.T) { var ( firstClaim = structs.IntentionJWTClaimVerification{ Path: []string{"perms"}, @@ -1234,7 +1239,7 @@ func TestJwtClaimToPrincipal(t *testing.T) { Identifier: &envoy_rbac_v3.Principal_Metadata{ Metadata: &envoy_matcher_v3.MetadataMatcher{ Filter: jwtEnvoyFilter, - Path: pathToSegments(secondClaim.Path, "second-key"), + Path: pathToSegments(secondClaim.Path, payloadKey), Value: &envoy_matcher_v3.ValueMatcher{ MatchPattern: &envoy_matcher_v3.ValueMatcher_StringMatch{ StringMatch: &envoy_matcher_v3.StringMatcher{ @@ -1249,38 +1254,21 @@ func TestJwtClaimToPrincipal(t *testing.T) { } ) tests := map[string]struct { - jwtInfos []*JWTInfo - expected *envoy_rbac_v3.Principal + claims []*structs.IntentionJWTClaimVerification + metadataPayloadKey string + expected *envoy_rbac_v3.Principal }{ - "single-jwt-info": { - jwtInfos: []*JWTInfo{ - { - Claims: []*structs.IntentionJWTClaimVerification{&firstClaim}, - MetadataPayloadKey: payloadKey, - }, - }, - expected: &envoy_rbac_v3.Principal{ - Identifier: &envoy_rbac_v3.Principal_OrIds{ - OrIds: &envoy_rbac_v3.Principal_Set{ - Ids: []*envoy_rbac_v3.Principal{&firstPrincipal}, - }, - }, - }, + "single-claim": { + claims: []*structs.IntentionJWTClaimVerification{&firstClaim}, + metadataPayloadKey: payloadKey, + expected: &firstPrincipal, }, - "multiple-jwt-info": { - jwtInfos: []*JWTInfo{ - { - Claims: []*structs.IntentionJWTClaimVerification{&firstClaim}, - MetadataPayloadKey: payloadKey, - }, - { - Claims: []*structs.IntentionJWTClaimVerification{&secondClaim}, - MetadataPayloadKey: "second-key", - }, - }, + "multiple-claims": { + claims: []*structs.IntentionJWTClaimVerification{&firstClaim, &secondClaim}, + metadataPayloadKey: payloadKey, expected: &envoy_rbac_v3.Principal{ - Identifier: &envoy_rbac_v3.Principal_OrIds{ - OrIds: &envoy_rbac_v3.Principal_Set{ + Identifier: &envoy_rbac_v3.Principal_AndIds{ + AndIds: &envoy_rbac_v3.Principal_Set{ Ids: []*envoy_rbac_v3.Principal{&firstPrincipal, &secondPrincipal}, }, }, @@ -1291,7 +1279,7 @@ func TestJwtClaimToPrincipal(t *testing.T) { for name, tt := range tests { tt := tt t.Run(name, func(t *testing.T) { - principal := jwtInfosToPrincipals(tt.jwtInfos) + principal := jwtClaimsToPrincipals(tt.claims, tt.metadataPayloadKey) require.Equal(t, principal, tt.expected) }) } diff --git a/agent/xds/testdata/jwt_authn/intention-with-path.golden b/agent/xds/testdata/jwt_authn/intention-with-path.golden index 6e925758ca36..3a66e2dcf362 100644 --- a/agent/xds/testdata/jwt_authn/intention-with-path.golden +++ b/agent/xds/testdata/jwt_authn/intention-with-path.golden @@ -3,9 +3,9 @@ "typedConfig": { "@type": "type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication", "providers": { - "okta_0": { + "okta": { "issuer": "test-issuer", - "payloadInMetadata": "jwt_payload_okta_0", + "payloadInMetadata": "jwt_payload_okta", "remoteJwks": { "httpUri": { "uri": "https://example-okta.com/.well-known/jwks.json", @@ -21,10 +21,15 @@ "rules": [ { "match": { - "prefix": "some-special-path" + "prefix": "/" }, "requires": { - "providerName": "okta_0" + "requiresAny": { + "requirements": [ + {"providerName": "okta"}, + {"allowMissingOrFailed": {}} + ] + } } } ] diff --git a/agent/xds/testdata/jwt_authn/local-provider.golden b/agent/xds/testdata/jwt_authn/local-provider.golden index 9efda0042bfd..528c0556a94b 100644 --- a/agent/xds/testdata/jwt_authn/local-provider.golden +++ b/agent/xds/testdata/jwt_authn/local-provider.golden @@ -17,7 +17,12 @@ "prefix": "/" }, "requires": { - "providerName": "okta" + "requiresAny": { + "requirements": [ + {"providerName": "okta"}, + {"allowMissingOrFailed": {}} + ] + } } } ] diff --git a/agent/xds/testdata/jwt_authn/multiple-providers-and-one-permission.golden b/agent/xds/testdata/jwt_authn/multiple-providers-and-one-permission.golden index ca9a99265ee8..7c970bde424d 100644 --- a/agent/xds/testdata/jwt_authn/multiple-providers-and-one-permission.golden +++ b/agent/xds/testdata/jwt_authn/multiple-providers-and-one-permission.golden @@ -17,20 +17,6 @@ } } }, - "okta_0": { - "issuer": "test-issuer", - "payloadInMetadata": "jwt_payload_okta_0", - "remoteJwks": { - "httpUri": { - "uri": "https://example-okta.com/.well-known/jwks.json", - "cluster": "jwks_cluster_okta", - "timeout": "1s" - }, - "asyncFetch": { - "fastListener": true - } - } - }, "auth0": { "issuer": "another-issuer", "payloadInMetadata": "jwt_payload_auth0", @@ -47,28 +33,32 @@ } }, "rules": [ - { - "match": { - "prefix": "some-special-path" - }, - "requires": { - "providerName": "okta_0" - } - }, { "match": { "prefix": "/" }, "requires": { - "providerName": "okta" - } - }, - { - "match": { - "prefix": "/" - }, - "requires": { - "providerName": "auth0" + "requiresAll": { + "requirements": [ + { + "requiresAny": { + "requirements": [ + {"providerName": "okta"}, + {"allowMissingOrFailed": {}} + ] + } + }, + { + "requiresAny": { + "requirements": [ + {"providerName": "auth0"}, + {"allowMissingOrFailed": {}} + ] + } + } + ] + } + } } ] diff --git a/agent/xds/testdata/jwt_authn/no-provider.golden b/agent/xds/testdata/jwt_authn/no-provider.golden new file mode 100644 index 000000000000..9e26dfeeb6e6 --- /dev/null +++ b/agent/xds/testdata/jwt_authn/no-provider.golden @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agent/xds/testdata/jwt_authn/remote-provider.golden b/agent/xds/testdata/jwt_authn/remote-provider.golden index 6116a58cec00..3a66e2dcf362 100644 --- a/agent/xds/testdata/jwt_authn/remote-provider.golden +++ b/agent/xds/testdata/jwt_authn/remote-provider.golden @@ -24,7 +24,12 @@ "prefix": "/" }, "requires": { - "providerName": "okta" + "requiresAny": { + "requirements": [ + {"providerName": "okta"}, + {"allowMissingOrFailed": {}} + ] + } } } ] diff --git a/agent/xds/testdata/jwt_authn/top-level-provider-with-permission.golden b/agent/xds/testdata/jwt_authn/top-level-provider-with-permission.golden index 6eed6793df52..1b0ded7fcb4d 100644 --- a/agent/xds/testdata/jwt_authn/top-level-provider-with-permission.golden +++ b/agent/xds/testdata/jwt_authn/top-level-provider-with-permission.golden @@ -16,37 +16,21 @@ "fastListener": true } } - }, - "okta_0": { - "issuer": "test-issuer", - "payloadInMetadata": "jwt_payload_okta_0", - "remoteJwks": { - "httpUri": { - "uri": "https://example-okta.com/.well-known/jwks.json", - "cluster": "jwks_cluster_okta", - "timeout": "1s" - }, - "asyncFetch": { - "fastListener": true - } - } } }, "rules": [ - { - "match": { - "prefix": "some-special-path" - }, - "requires": { - "providerName": "okta_0" - } - }, { "match": { "prefix": "/" }, "requires": { - "providerName": "okta" + "requiresAny": { + "requirements": [ + {"providerName": "okta"}, + {"allowMissingOrFailed": {}} + ] + } + } } ] diff --git a/agent/xds/testdata/rbac/empty-top-level-jwt-with-one-permission--httpfilter.golden b/agent/xds/testdata/rbac/empty-top-level-jwt-with-one-permission--httpfilter.golden index cd5c35bab27e..f5eb4bdbcb6e 100644 --- a/agent/xds/testdata/rbac/empty-top-level-jwt-with-one-permission--httpfilter.golden +++ b/agent/xds/testdata/rbac/empty-top-level-jwt-with-one-permission--httpfilter.golden @@ -7,35 +7,37 @@ "consul-intentions-layer7-0": { "permissions": [ { - "urlPath": { - "path": { - "prefix": "some-path" - } - } - } - ], - "principals": [ - { - "andIds": { - "ids": [ + "andRules": { + "rules": [ { - "authenticated": { - "principalName": { - "safeRegex": { - "googleRe2": {}, - "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$" - } + "urlPath": { + "path": { + "prefix": "some-path" } } }, { - "orIds": { - "ids": [ - { + "andRules": { + "rules": [ + { + "metadata": { + "filter": "envoy.filters.http.jwt_authn", + "path": [ + {"key": "jwt_payload_okta"}, + {"key": "iss"} + ], + "value": { + "stringMatch": { + "exact": "mytest.okta-issuer" + } + } + } + }, + { "metadata": { - "filter":"envoy.filters.http.jwt_authn", + "filter": "envoy.filters.http.jwt_authn", "path": [ - {"key": "jwt_payload_okta_0"}, + {"key": "jwt_payload_okta"}, {"key": "roles"} ], "value": { @@ -51,6 +53,18 @@ ] } } + ], + "principals": [ + { + "authenticated": { + "principalName": { + "safeRegex": { + "googleRe2": {}, + "regex": "^spiffe://test.consul/ns/default/dc/[^/]+/svc/web$" + } + } + } + } ] } } diff --git a/agent/xds/testdata/rbac/top-level-jwt-no-permissions--httpfilter.golden b/agent/xds/testdata/rbac/top-level-jwt-no-permissions--httpfilter.golden index 35b3792e6658..efa9293f3c16 100644 --- a/agent/xds/testdata/rbac/top-level-jwt-no-permissions--httpfilter.golden +++ b/agent/xds/testdata/rbac/top-level-jwt-no-permissions--httpfilter.golden @@ -27,8 +27,22 @@ } }, { - "orIds": { + "andIds": { "ids": [ + { + "metadata": { + "filter":"envoy.filters.http.jwt_authn", + "path": [ + {"key": "jwt_payload_okta"}, + {"key": "iss"} + ], + "value": { + "stringMatch": { + "exact": "mytest.okta-issuer" + } + } + } + }, { "metadata": { "filter":"envoy.filters.http.jwt_authn", diff --git a/agent/xds/testdata/rbac/top-level-jwt-with-multiple-permissions--httpfilter.golden b/agent/xds/testdata/rbac/top-level-jwt-with-multiple-permissions--httpfilter.golden index 409a3a4bd6b4..6ce0662e3b09 100644 --- a/agent/xds/testdata/rbac/top-level-jwt-with-multiple-permissions--httpfilter.golden +++ b/agent/xds/testdata/rbac/top-level-jwt-with-multiple-permissions--httpfilter.golden @@ -6,31 +6,113 @@ "policies": { "consul-intentions-layer7-0": { "permissions": [ - { - "urlPath": { - "path": { - "exact": "/v1/secret" - } - } - }, { "andRules": { "rules": [ { "urlPath": { "path": { - "exact": "/v1/admin" + "exact": "/v1/secret" } } }, { - "notRule": { - "urlPath": { - "path": { - "exact": "/v1/secret" + "andRules": { + "rules": [ + { + "metadata": { + "filter": "envoy.filters.http.jwt_authn", + "path": [ + {"key": "jwt_payload_auth0"}, + {"key": "iss"} + ], + "value": { + "stringMatch": { + "exact": "mytest.auth0-issuer" + } + } + } + }, + { + "metadata": { + "filter": "envoy.filters.http.jwt_authn", + "path": [ + {"key": "jwt_payload_auth0"}, + {"key": "perms"}, + {"key": "role"} + ], + "value": { + "stringMatch": { + "exact": "admin" + } + } + } } - } + ] + } + } + ] + } + }, + { + "andRules": { + "rules": [ + { + "andRules": { + "rules": [ + { + "urlPath": { + "path": { + "exact": "/v1/admin" + } + } + }, + { + "notRule": { + "urlPath": { + "path": { + "exact": "/v1/secret" + } + } + } + } + ] } + }, + { + "andRules": { + "rules": [ + { + "metadata": { + "filter": "envoy.filters.http.jwt_authn", + "path": [ + {"key": "jwt_payload_auth0"}, + {"key": "iss"} + ], + "value": { + "stringMatch": { + "exact": "mytest.auth0-issuer" + } + } + } + }, + { + "metadata": { + "filter": "envoy.filters.http.jwt_authn", + "path": [ + {"key": "jwt_payload_auth0"}, + {"key": "perms"}, + {"key": "role"} + ], + "value": { + "stringMatch": { + "exact": "admin" + } + } + } + } + ] + } } ] } @@ -53,33 +135,18 @@ } }, { - "orIds": { + "andIds": { "ids": [ { "metadata": { "filter":"envoy.filters.http.jwt_authn", "path": [ {"key": "jwt_payload_okta"}, - {"key": "roles"} - ], - "value": { - "stringMatch": { - "exact": "testing" - } - } - } - }, - { - "metadata": { - "filter":"envoy.filters.http.jwt_authn", - "path": [ - {"key": "jwt_payload_auth0_0"}, - {"key": "perms"}, - {"key": "role"} + {"key": "iss"} ], "value": { "stringMatch": { - "exact": "admin" + "exact": "mytest.okta-issuer" } } } @@ -88,13 +155,12 @@ "metadata": { "filter":"envoy.filters.http.jwt_authn", "path": [ - {"key": "jwt_payload_auth0_1"}, - {"key": "perms"}, - {"key": "role"} + {"key": "jwt_payload_okta"}, + {"key": "roles"} ], "value": { "stringMatch": { - "exact": "admin" + "exact": "testing" } } } diff --git a/agent/xds/testdata/rbac/top-level-jwt-with-one-permission--httpfilter.golden b/agent/xds/testdata/rbac/top-level-jwt-with-one-permission--httpfilter.golden index edf027f3e0cb..36ba23c29314 100644 --- a/agent/xds/testdata/rbac/top-level-jwt-with-one-permission--httpfilter.golden +++ b/agent/xds/testdata/rbac/top-level-jwt-with-one-permission--httpfilter.golden @@ -7,10 +7,51 @@ "consul-intentions-layer7-0": { "permissions": [ { - "urlPath": { - "path": { - "exact": "/v1/secret" - } + "andRules": { + "rules": [ + { + "urlPath": { + "path": { + "exact": "/v1/secret" + } + } + }, + { + "andRules": { + "rules": [ + { + "metadata": { + "filter": "envoy.filters.http.jwt_authn", + "path": [ + {"key": "jwt_payload_auth0"}, + {"key": "iss"} + ], + "value": { + "stringMatch": { + "exact": "mytest.auth0-issuer" + } + } + } + }, + { + "metadata": { + "filter": "envoy.filters.http.jwt_authn", + "path": [ + {"key": "jwt_payload_auth0"}, + {"key": "perms"}, + {"key": "role"} + ], + "value": { + "stringMatch": { + "exact": "admin" + } + } + } + } + ] + } + } + ] } }, { @@ -53,18 +94,18 @@ } }, { - "orIds": { + "andIds": { "ids": [ { "metadata": { "filter":"envoy.filters.http.jwt_authn", "path": [ {"key": "jwt_payload_okta"}, - {"key": "roles"} + {"key": "iss"} ], "value": { "stringMatch": { - "exact": "testing" + "exact": "mytest.okta-issuer" } } } @@ -73,13 +114,12 @@ "metadata": { "filter":"envoy.filters.http.jwt_authn", "path": [ - {"key": "jwt_payload_auth0_0"}, - {"key": "perms"}, - {"key": "role"} + {"key": "jwt_payload_okta"}, + {"key": "roles"} ], "value": { "stringMatch": { - "exact": "admin" + "exact": "testing" } } } diff --git a/test/integration/consul-container/test/jwtauth/jwt_auth_test.go b/test/integration/consul-container/test/jwtauth/jwt_auth_test.go index 37c846d0a6d4..498bdcedf181 100644 --- a/test/integration/consul-container/test/jwtauth/jwt_auth_test.go +++ b/test/integration/consul-container/test/jwtauth/jwt_auth_test.go @@ -76,8 +76,8 @@ func TestJWTAuthConnectService(t *testing.T) { configureIntentions(t, cluster) baseURL := fmt.Sprintf("http://localhost:%d", clientPort) - // fails without jwt headers - doRequest(t, baseURL, http.StatusUnauthorized, "") + // TODO(roncodingenthusiast): update test to reflect jwt-auth filter in metadata mode + doRequest(t, baseURL, http.StatusOK, "") // succeeds with jwt doRequest(t, baseURL, http.StatusOK, jwt) } From f7c5ba5f90b9c060cad1f6c80dc4b3eef7fd81d7 Mon Sep 17 00:00:00 2001 From: Ashesh Vidyut <134911583+absolutelightning@users.noreply.github.com> Date: Mon, 17 Jul 2023 21:40:07 +0530 Subject: [PATCH 37/43] Support Consul Connect Envoy Command on Windows (#17694) ### Description Add support for consul connect envoy command on windows. This PR fixes the comments of PR - https://github.com/hashicorp/consul/pull/15114 ### Testing * Built consul.exe from this branch on windows and hosted here - [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Updated the [tutorial](https://developer.hashicorp.com/consul/tutorials/developer-mesh/consul-windows-workloads) and changed the `consul_url.default` value to [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Followed the steps in the tutorial and verified that everything is working as described. ### PR Checklist * [x] updated test coverage * [ ] external facing docs updated * [x] appropriate backport labels added * [x] not a security concern --------- Co-authored-by: Franco Bruno Lavayen Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo Co-authored-by: Dhia Ayachi --- .changelog/17694.txt | 3 + command/connect/envoy/envoy.go | 2 +- command/connect/envoy/exec_supported.go | 55 +++++++++++ command/connect/envoy/exec_unix.go | 51 ---------- command/connect/envoy/exec_unsupported.go | 4 +- command/connect/envoy/exec_windows.go | 110 ++++++++++++++++++++++ go.mod | 2 + go.sum | 4 + 8 files changed, 177 insertions(+), 54 deletions(-) create mode 100644 .changelog/17694.txt create mode 100644 command/connect/envoy/exec_supported.go create mode 100644 command/connect/envoy/exec_windows.go diff --git a/.changelog/17694.txt b/.changelog/17694.txt new file mode 100644 index 000000000000..703b100d1d3a --- /dev/null +++ b/.changelog/17694.txt @@ -0,0 +1,3 @@ +```release-note:feature +Windows: support consul connect envoy command on Windows +``` diff --git a/command/connect/envoy/envoy.go b/command/connect/envoy/envoy.go index a6212ae4ca42..48ee199c1a5e 100644 --- a/command/connect/envoy/envoy.go +++ b/command/connect/envoy/envoy.go @@ -38,7 +38,7 @@ func New(ui cli.Ui) *cmd { return c } -const DefaultAdminAccessLogPath = "/dev/null" +const DefaultAdminAccessLogPath = os.DevNull type cmd struct { UI cli.Ui diff --git a/command/connect/envoy/exec_supported.go b/command/connect/envoy/exec_supported.go new file mode 100644 index 000000000000..09dbf895bb12 --- /dev/null +++ b/command/connect/envoy/exec_supported.go @@ -0,0 +1,55 @@ +//go:build linux || darwin || windows +// +build linux darwin windows + +package envoy + +import ( + "fmt" + "os" + "strings" +) + +func isHotRestartOption(s string) bool { + restartOpts := []string{ + "--restart-epoch", + "--hot-restart-version", + "--drain-time-s", + "--parent-shutdown-time-s", + } + for _, opt := range restartOpts { + if s == opt { + return true + } + if strings.HasPrefix(s, opt+"=") { + return true + } + } + return false +} + +func hasHotRestartOption(argSets ...[]string) bool { + for _, args := range argSets { + for _, opt := range args { + if isHotRestartOption(opt) { + return true + } + } + } + return false +} + +// execArgs returns the command and args used to execute a binary. By default it +// will return a command of os.Executable with the args unmodified. This is a shim +// for testing, and can be overridden to execute using 'go run' instead. +var execArgs = func(args ...string) (string, []string, error) { + execPath, err := os.Executable() + if err != nil { + return "", nil, err + } + + if strings.HasSuffix(execPath, "/envoy.test") { + return "", nil, fmt.Errorf("set execArgs to use 'go run' instead of doing a self-exec") + } + + return execPath, args, nil +} diff --git a/command/connect/envoy/exec_unix.go b/command/connect/envoy/exec_unix.go index d3eb0765a9f9..e3d07e2af36d 100644 --- a/command/connect/envoy/exec_unix.go +++ b/command/connect/envoy/exec_unix.go @@ -12,63 +12,12 @@ import ( "os" "os/exec" "path/filepath" - "strings" "syscall" "time" "golang.org/x/sys/unix" ) -// testSelfExecOverride is a way for the tests to no fork-bomb themselves by -// self-executing the whole test suite for each case recursively. It's gross but -// the least gross option I could think of. -var testSelfExecOverride string - -func isHotRestartOption(s string) bool { - restartOpts := []string{ - "--restart-epoch", - "--hot-restart-version", - "--drain-time-s", - "--parent-shutdown-time-s", - } - for _, opt := range restartOpts { - if s == opt { - return true - } - if strings.HasPrefix(s, opt+"=") { - return true - } - } - return false -} - -func hasHotRestartOption(argSets ...[]string) bool { - for _, args := range argSets { - for _, opt := range args { - if isHotRestartOption(opt) { - return true - } - } - } - return false -} - -// execArgs returns the command and args used to execute a binary. By default it -// will return a command of os.Executable with the args unmodified. This is a shim -// for testing, and can be overridden to execute using 'go run' instead. -var execArgs = func(args ...string) (string, []string, error) { - execPath, err := os.Executable() - if err != nil { - return "", nil, err - } - - if strings.HasSuffix(execPath, "/envoy.test") { - return "", nil, fmt.Errorf("set execArgs to use 'go run' instead of doing a self-exec") - } - - return execPath, args, nil -} - func makeBootstrapPipe(bootstrapJSON []byte) (string, error) { pipeFile := filepath.Join(os.TempDir(), fmt.Sprintf("envoy-%x-bootstrap.json", time.Now().UnixNano()+int64(os.Getpid()))) diff --git a/command/connect/envoy/exec_unsupported.go b/command/connect/envoy/exec_unsupported.go index c9686098983e..ebbce2dfa25f 100644 --- a/command/connect/envoy/exec_unsupported.go +++ b/command/connect/envoy/exec_unsupported.go @@ -1,8 +1,8 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:build !linux && !darwin -// +build !linux,!darwin +//go:build !linux && !darwin && !windows +// +build !linux,!darwin,!windows package envoy diff --git a/command/connect/envoy/exec_windows.go b/command/connect/envoy/exec_windows.go new file mode 100644 index 000000000000..e70108794ca0 --- /dev/null +++ b/command/connect/envoy/exec_windows.go @@ -0,0 +1,110 @@ +//go:build windows +// +build windows + +package envoy + +import ( + "errors" + "fmt" + "github.com/natefinch/npipe" + "os" + "os/exec" + "path/filepath" + "time" +) + +func makeBootstrapPipe(bootstrapJSON []byte) (string, error) { + pipeFile := filepath.Join(os.TempDir(), + fmt.Sprintf("envoy-%x-bootstrap.json", time.Now().UnixNano()+int64(os.Getpid()))) + + binary, args, err := execArgs("connect", "envoy", "pipe-bootstrap", pipeFile) + if err != nil { + return pipeFile, err + } + + // Dial the named pipe + pipeConn, err := npipe.Dial(pipeFile) + if err != nil { + return pipeFile, err + } + defer pipeConn.Close() + + // Start the command to connect to the named pipe + cmd := exec.Command(binary, args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = pipeConn + + // Start the command + err = cmd.Start() + if err != nil { + return pipeFile, err + } + + // Write the config + n, err := pipeConn.Write(bootstrapJSON) + if err != nil { + return pipeFile, err + } + + if n < len(bootstrapJSON) { + return pipeFile, fmt.Errorf("failed writing boostrap to child STDIN: %s", err) + } + + // We can't wait for the process since we need to exec into Envoy before it + // will be able to complete so it will be remain as a zombie until Envoy is + // killed then will be reaped by the init process (pid 0). This is all a bit + // gross but the cleanest workaround I can think of for Envoy 1.10 not + // supporting /dev/fd/ config paths any more. So we are done and leaving + // the child to run it's course without reaping it. + return pipeFile, nil +} + +func startProc(binary string, args []string) (p *os.Process, err error) { + if binary, err = exec.LookPath(binary); err == nil { + var procAttr os.ProcAttr + procAttr.Files = []*os.File{os.Stdin, + os.Stdout, os.Stderr} + p, err := os.StartProcess(binary, args, &procAttr) + if err == nil { + return p, nil + } + } + return nil, err +} + +func execEnvoy(binary string, prefixArgs, suffixArgs []string, bootstrapJSON []byte) error { + tempFile, err := makeBootstrapPipe(bootstrapJSON) + if err != nil { + os.RemoveAll(tempFile) + return err + } + // We don't defer a cleanup since we are about to Exec into Envoy which means + // defer will never fire. The child process cleans up for us in the happy + // path. + + // We default to disabling hot restart because it makes it easier to run + // multiple envoys locally for testing without them trying to share memory and + // unix sockets and complain about being different IDs. But if user is + // actually configuring hot-restart explicitly with the --restart-epoch option + // then don't disable it! + disableHotRestart := !hasHotRestartOption(prefixArgs, suffixArgs) + + // First argument needs to be the executable name. + envoyArgs := []string{} + envoyArgs = append(envoyArgs, prefixArgs...) + if disableHotRestart { + envoyArgs = append(envoyArgs, "--disable-hot-restart") + } + envoyArgs = append(envoyArgs, suffixArgs...) + envoyArgs = append(envoyArgs, "--config-path", tempFile) + + // Exec + if proc, err := startProc(binary, envoyArgs); err == nil { + proc.Wait() + } else if err != nil { + return errors.New("Failed to exec envoy: " + err.Error()) + } + + return nil +} diff --git a/go.mod b/go.mod index 29b22f8f6d25..c4fc7a5c847d 100644 --- a/go.mod +++ b/go.mod @@ -86,6 +86,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/pointerstructure v1.2.1 github.com/mitchellh/reflectwalk v1.0.2 + github.com/natefinch/npipe v0.0.0-20160621034901-c1b8fa8bdcce github.com/oklog/ulid/v2 v2.1.0 github.com/olekukonko/tablewriter v0.0.4 github.com/patrickmn/go-cache v2.1.0+incompatible @@ -250,6 +251,7 @@ require ( google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/resty.v1 v1.12.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 5bd76271a38a..8d340dcdb30d 100644 --- a/go.sum +++ b/go.sum @@ -739,6 +739,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/natefinch/npipe v0.0.0-20160621034901-c1b8fa8bdcce h1:TqjP/BTDrwN7zP9xyXVuLsMBXYMt6LLYi55PlrIcq8U= +github.com/natefinch/npipe v0.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:ifHPsLndGGzvgzcaXUvzmt6LxKT4pJ+uzEhtnMt+f7A= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -1458,6 +1460,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc= gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= From e52ea0ee7abbd1f3a820dd955799e782ef72aa4c Mon Sep 17 00:00:00 2001 From: David Yu Date: Mon, 17 Jul 2023 09:53:36 -0700 Subject: [PATCH 38/43] Change docs to say 168h instead of 7d for server_rejoin_age_max (#18154) ### Description Addresses https://github.com/hashicorp/consul/pull/17171#issuecomment-1636930705 --- website/content/docs/agent/config/config-files.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index 8d46b63bd0ce..f314acd38ca9 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -742,7 +742,7 @@ Refer to the [formatting specification](https://golang.org/pkg/time/#ParseDurati - `server_rejoin_age_max` - controls the allowed maximum age of a stale server attempting to rejoin a cluster. If a server is not running for this period, then it will refuse to start up again until an operator intervenes. This is to protect clusters from instability caused by decommissioned servers accidentally being started again. - Note: the default value is 7d and the minimum value is 6h. + Note: the default value is 168h (equal to 7d) and the minimum value is 6h. - `non_voting_server` - **This field is deprecated in Consul 1.9.1. See the [`read_replica`](#read_replica) field instead.** From 33d898b857de912da508dc7f062b4909a23ac6e4 Mon Sep 17 00:00:00 2001 From: Dan Stough Date: Mon, 17 Jul 2023 13:49:40 -0400 Subject: [PATCH 39/43] [OSS] test: improve xDS listener code coverage (#18138) test: improve xDS listener code coverage --- agent/proxycfg/testing_connect_proxy.go | 40 +++ agent/proxycfg/testing_ingress_gateway.go | 211 ++++++++++++++- agent/proxycfg/testing_mesh_gateway.go | 71 ++++- agent/proxycfg/testing_peering.go | 76 ++++++ agent/xds/listeners_apigateway.go | 29 +-- agent/xds/listeners_test.go | 80 +++++- agent/xds/resources_test.go | 139 +++++++++- ...api-gateway-with-http-route.latest.golden} | 0 ...multiple-inline-certificates.latest.golden | 55 ++++ ...-upstreams-listener-override.latest.golden | 147 +++++++++++ ...api-gateway-with-http-route.latest.golden} | 0 ...multiple-inline-certificates.latest.golden | 5 + ...-upstreams-listener-override.latest.golden | 29 +++ ...route-and-inline-certificate.latest.golden | 49 ---- .../api-gateway-with-http-route.latest.golden | 80 ++++++ ...multiple-inline-certificates.latest.golden | 102 ++++++++ ...-upstreams-listener-override.latest.golden | 114 ++++++++ ...h-tproxy-and-permissive-mtls.latest.golden | 6 +- ...upstream-with-prepared-query.latest.golden | 114 ++++++++ ...olden => expose-checks-grpc.latest.golden} | 33 +-- ...ecks-http-with-bind-override.latest.golden | 137 ++++++++++ .../expose-checks-http.latest.golden | 137 ++++++++++ ...ixed-cipher-suites-listeners.latest.golden | 156 +++++++++++ ...-mixed-max-version-listeners.latest.golden | 223 ++++++++++++++++ ...ing-federation-control-plane.latest.golden | 181 +++++++++++++ ...ateway-custom-trace-listener.latest.golden | 246 ++++++++++++++++++ ...route-and-inline-certificate.latest.golden | 50 ---- .../api-gateway-with-http-route.latest.golden | 50 ++++ ...multiple-inline-certificates.latest.golden | 5 + ...-upstreams-listener-override.latest.golden | 5 + .../api-gateway-with-http-route.latest.golden | 5 + ...multiple-inline-certificates.latest.golden | 5 + ...-upstreams-listener-override.latest.golden | 5 + 33 files changed, 2425 insertions(+), 160 deletions(-) rename agent/xds/testdata/clusters/{api-gateway-with-http-route-and-inline-certificate.latest.golden => api-gateway-with-http-route.latest.golden} (100%) create mode 100644 agent/xds/testdata/clusters/api-gateway-with-multiple-inline-certificates.latest.golden create mode 100644 agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams-listener-override.latest.golden rename agent/xds/testdata/endpoints/{api-gateway-with-http-route-and-inline-certificate.latest.golden => api-gateway-with-http-route.latest.golden} (100%) create mode 100644 agent/xds/testdata/endpoints/api-gateway-with-multiple-inline-certificates.latest.golden create mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-peered-upstreams-listener-override.latest.golden delete mode 100644 agent/xds/testdata/listeners/api-gateway-with-http-route-and-inline-certificate.latest.golden create mode 100644 agent/xds/testdata/listeners/api-gateway-with-http-route.latest.golden create mode 100644 agent/xds/testdata/listeners/api-gateway-with-multiple-inline-certificates.latest.golden create mode 100644 agent/xds/testdata/listeners/connect-proxy-with-peered-upstreams-listener-override.latest.golden create mode 100644 agent/xds/testdata/listeners/custom-upstream-with-prepared-query.latest.golden rename agent/xds/testdata/listeners/{expose-checks.latest.golden => expose-checks-grpc.latest.golden} (89%) create mode 100644 agent/xds/testdata/listeners/expose-checks-http-with-bind-override.latest.golden create mode 100644 agent/xds/testdata/listeners/expose-checks-http.latest.golden create mode 100644 agent/xds/testdata/listeners/ingress-with-tls-mixed-cipher-suites-listeners.latest.golden create mode 100644 agent/xds/testdata/listeners/ingress-with-tls-mixed-max-version-listeners.latest.golden create mode 100644 agent/xds/testdata/listeners/mesh-gateway-using-federation-control-plane.latest.golden create mode 100644 agent/xds/testdata/listeners/terminating-gateway-custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/api-gateway-with-http-route-and-inline-certificate.latest.golden create mode 100644 agent/xds/testdata/routes/api-gateway-with-http-route.latest.golden create mode 100644 agent/xds/testdata/routes/api-gateway-with-multiple-inline-certificates.latest.golden create mode 100644 agent/xds/testdata/routes/connect-proxy-with-peered-upstreams-listener-override.latest.golden create mode 100644 agent/xds/testdata/secrets/api-gateway-with-http-route.latest.golden create mode 100644 agent/xds/testdata/secrets/api-gateway-with-multiple-inline-certificates.latest.golden create mode 100644 agent/xds/testdata/secrets/connect-proxy-with-peered-upstreams-listener-override.latest.golden diff --git a/agent/proxycfg/testing_connect_proxy.go b/agent/proxycfg/testing_connect_proxy.go index a929aa52f167..8a1583edd0f0 100644 --- a/agent/proxycfg/testing_connect_proxy.go +++ b/agent/proxycfg/testing_connect_proxy.go @@ -227,6 +227,14 @@ func TestConfigSnapshotExposeConfig(t testing.T, nsFn func(ns *structs.NodeServi } func TestConfigSnapshotExposeChecks(t testing.T) *ConfigSnapshot { + return testConfigSnapshotExposedChecks(t, false) +} + +func TestConfigSnapshotExposeChecksWithBindOverride(t testing.T) *ConfigSnapshot { + return testConfigSnapshotExposedChecks(t, true) +} + +func testConfigSnapshotExposedChecks(t testing.T, overrideBind bool) *ConfigSnapshot { return TestConfigSnapshot(t, func(ns *structs.NodeService) { ns.Address = "1.2.3.4" @@ -235,6 +243,12 @@ func TestConfigSnapshotExposeChecks(t testing.T) *ConfigSnapshot { ns.Proxy.Expose = structs.ExposeConfig{ Checks: true, } + if overrideBind { + if ns.Proxy.Config == nil { + ns.Proxy.Config = map[string]any{} + } + ns.Proxy.Config["bind_address"] = "6.7.8.9" + } }, []UpdateEvent{ { @@ -253,6 +267,32 @@ func TestConfigSnapshotExposeChecks(t testing.T) *ConfigSnapshot { ) } +func TestConfigSnapshotExposeChecksGRPC(t testing.T) *ConfigSnapshot { + return TestConfigSnapshot(t, + func(ns *structs.NodeService) { + ns.Address = "1.2.3.4" + ns.Port = 9090 + ns.Proxy.Upstreams = nil + ns.Proxy.Expose = structs.ExposeConfig{ + Checks: true, + } + }, + []UpdateEvent{ + { + CorrelationID: svcChecksWatchIDPrefix + structs.ServiceIDString("web", nil), + Result: []structs.CheckType{{ + CheckID: types.CheckID("grpc"), + Name: "grpc", + GRPC: "localhost:9090/v1.Health", + ProxyGRPC: "localhost:21501/myservice", + Interval: 10 * time.Second, + Timeout: 1 * time.Second, + }}, + }, + }, + ) +} + func TestConfigSnapshotGRPCExposeHTTP1(t testing.T) *ConfigSnapshot { roots, leaf := TestCerts(t) diff --git a/agent/proxycfg/testing_ingress_gateway.go b/agent/proxycfg/testing_ingress_gateway.go index d6b2c3ad2eb8..7c3599af203a 100644 --- a/agent/proxycfg/testing_ingress_gateway.go +++ b/agent/proxycfg/testing_ingress_gateway.go @@ -1888,8 +1888,8 @@ func TestConfigSnapshotIngressGateway_TLSMixedMinVersionListeners(t testing.T) * entry.TLS.Enabled = true entry.TLS.TLSMinVersion = types.TLSv1_2 - // One listener disables TLS, one inherits TLS minimum version from the gateway - // config, two others set different versions + // One listener should inherit TLS minimum version from the gateway config, + // two others each set explicit TLS minimum versions entry.Listeners = []structs.IngressListener{ { Port: 8080, @@ -1925,8 +1925,6 @@ func TestConfigSnapshotIngressGateway_TLSMixedMinVersionListeners(t testing.T) * { CorrelationID: gatewayServicesWatchID, Result: &structs.IndexedGatewayServices{ - // One listener should inherit TLS minimum version from the gateway config, - // two others each set explicit TLS minimum versions Services: []*structs.GatewayService{ { Service: s1, @@ -1984,3 +1982,208 @@ func TestConfigSnapshotIngressGateway_TLSMixedMinVersionListeners(t testing.T) * }, }) } + +func TestConfigSnapshotIngressGateway_TLSMixedMaxVersionListeners(t testing.T) *ConfigSnapshot { + var ( + s1 = structs.NewServiceName("s1", nil) + s1UID = NewUpstreamIDFromServiceName(s1) + s1Chain = discoverychain.TestCompileConfigEntries(t, "s1", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) + + s2 = structs.NewServiceName("s2", nil) + s2UID = NewUpstreamIDFromServiceName(s2) + s2Chain = discoverychain.TestCompileConfigEntries(t, "s2", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) + + s3 = structs.NewServiceName("s3", nil) + s3UID = NewUpstreamIDFromServiceName(s3) + s3Chain = discoverychain.TestCompileConfigEntries(t, "s3", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) + ) + + return TestConfigSnapshotIngressGateway(t, true, "tcp", "default", nil, + func(entry *structs.IngressGatewayConfigEntry) { + entry.TLS.Enabled = true + entry.TLS.TLSMaxVersion = types.TLSv1_2 + + // One listener should inherit TLS maximum version from the gateway config, + // two others each set explicit TLS maximum versions + entry.Listeners = []structs.IngressListener{ + { + Port: 8080, + Protocol: "http", + Services: []structs.IngressService{ + {Name: "s1"}, + }, + }, + { + Port: 8081, + Protocol: "http", + Services: []structs.IngressService{ + {Name: "s2"}, + }, + TLS: &structs.GatewayTLSConfig{ + Enabled: true, + TLSMaxVersion: types.TLSv1_0, + }, + }, + { + Port: 8082, + Protocol: "http", + Services: []structs.IngressService{ + {Name: "s3"}, + }, + TLS: &structs.GatewayTLSConfig{ + Enabled: true, + TLSMaxVersion: types.TLSv1_3, + }, + }, + } + }, []UpdateEvent{ + { + CorrelationID: gatewayServicesWatchID, + Result: &structs.IndexedGatewayServices{ + Services: []*structs.GatewayService{ + { + Service: s1, + Port: 8080, + Protocol: "http", + }, + { + Service: s2, + Port: 8081, + Protocol: "http", + }, + { + Service: s3, + Port: 8082, + Protocol: "http", + }, + }, + }, + }, + { + CorrelationID: "discovery-chain:" + s1UID.String(), + Result: &structs.DiscoveryChainResponse{ + Chain: s1Chain, + }, + }, + { + CorrelationID: "discovery-chain:" + s2UID.String(), + Result: &structs.DiscoveryChainResponse{ + Chain: s2Chain, + }, + }, + { + CorrelationID: "discovery-chain:" + s3UID.String(), + Result: &structs.DiscoveryChainResponse{ + Chain: s3Chain, + }, + }, + { + CorrelationID: "upstream-target:" + s1Chain.ID() + ":" + s1UID.String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: TestUpstreamNodes(t, "s1"), + }, + }, + { + CorrelationID: "upstream-target:" + s2Chain.ID() + ":" + s2UID.String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: TestUpstreamNodes(t, "s2"), + }, + }, + { + CorrelationID: "upstream-target:" + s3Chain.ID() + ":" + s3UID.String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: TestUpstreamNodes(t, "s3"), + }, + }, + }) +} + +func TestConfigSnapshotIngressGateway_TLSMixedCipherVersionListeners(t testing.T) *ConfigSnapshot { + var ( + s1 = structs.NewServiceName("s1", nil) + s1UID = NewUpstreamIDFromServiceName(s1) + s1Chain = discoverychain.TestCompileConfigEntries(t, "s1", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) + + s2 = structs.NewServiceName("s2", nil) + s2UID = NewUpstreamIDFromServiceName(s2) + s2Chain = discoverychain.TestCompileConfigEntries(t, "s2", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) + ) + + return TestConfigSnapshotIngressGateway(t, true, "tcp", "default", nil, + func(entry *structs.IngressGatewayConfigEntry) { + entry.TLS.Enabled = true + entry.TLS.CipherSuites = []types.TLSCipherSuite{ + types.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + } + + // One listener should inherit TLS Ciphers from the gateway config, + // the other should be set explicitly from the listener config + entry.Listeners = []structs.IngressListener{ + { + Port: 8080, + Protocol: "http", + Services: []structs.IngressService{ + {Name: "s1"}, + }, + }, + { + Port: 8081, + Protocol: "http", + Services: []structs.IngressService{ + {Name: "s2"}, + }, + TLS: &structs.GatewayTLSConfig{ + Enabled: true, + CipherSuites: []types.TLSCipherSuite{ + types.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + }, + }, + }, + } + }, []UpdateEvent{ + { + CorrelationID: gatewayServicesWatchID, + Result: &structs.IndexedGatewayServices{ + // One listener should inherit TLS minimum version from the gateway config, + // two others each set explicit TLS minimum versions + Services: []*structs.GatewayService{ + { + Service: s1, + Port: 8080, + Protocol: "http", + }, + { + Service: s2, + Port: 8081, + Protocol: "http", + }, + }, + }, + }, + { + CorrelationID: "discovery-chain:" + s1UID.String(), + Result: &structs.DiscoveryChainResponse{ + Chain: s1Chain, + }, + }, + { + CorrelationID: "discovery-chain:" + s2UID.String(), + Result: &structs.DiscoveryChainResponse{ + Chain: s2Chain, + }, + }, + { + CorrelationID: "upstream-target:" + s1Chain.ID() + ":" + s1UID.String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: TestUpstreamNodes(t, "s1"), + }, + }, + { + CorrelationID: "upstream-target:" + s2Chain.ID() + ":" + s2UID.String(), + Result: &structs.IndexedCheckServiceNodes{ + Nodes: TestUpstreamNodes(t, "s2"), + }, + }, + }) +} diff --git a/agent/proxycfg/testing_mesh_gateway.go b/agent/proxycfg/testing_mesh_gateway.go index 0ad9d4524afe..b45595502ec2 100644 --- a/agent/proxycfg/testing_mesh_gateway.go +++ b/agent/proxycfg/testing_mesh_gateway.go @@ -23,9 +23,10 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st roots, _ := TestCertsForMeshGateway(t) var ( - populateServices = true - useFederationStates = false - deleteCrossDCEntry = false + populateServices = true + useFederationStates = false + deleteCrossDCEntry = false + meshGatewayFederation = false ) switch variant { @@ -34,6 +35,11 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st populateServices = true useFederationStates = true deleteCrossDCEntry = true + case "mesh-gateway-federation": + populateServices = true + useFederationStates = true + deleteCrossDCEntry = true + meshGatewayFederation = true case "newer-info-in-federation-states": populateServices = true useFederationStates = true @@ -447,6 +453,63 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st }) } + var serverSNIFn ServerSNIFunc + if meshGatewayFederation { + + // reproduced from tlsutil/config.go + serverSNIFn = func(dc, nodeName string) string { + // Strip the trailing '.' from the domain if any + domain := "consul" + + if nodeName == "" || nodeName == "*" { + return "server." + dc + "." + domain + } + + return nodeName + ".server." + dc + "." + domain + } + + baseEvents = testSpliceEvents(baseEvents, []UpdateEvent{ + { + CorrelationID: consulServerListWatchID, + Result: &structs.IndexedCheckServiceNodes{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Datacenter: "dc1", + Node: "node1", + Address: "127.0.0.1", + }, + Service: &structs.NodeService{ + ID: structs.ConsulServiceID, + Service: structs.ConsulServiceName, + Meta: map[string]string{ + "grpc_port": "8502", + "grpc_tls_port": "8503", + }, + }, + }, + { + Node: &structs.Node{ + Datacenter: "dc1", + Node: "node2", + Address: "127.0.0.2", + }, + Service: &structs.NodeService{ + ID: structs.ConsulServiceID, + Service: structs.ConsulServiceName, + Meta: map[string]string{ + "grpc_port": "8502", + "grpc_tls_port": "8503", + }, + }, + }, + }, + }, + }, + }) + + } + return testConfigSnapshotFixture(t, &structs.NodeService{ Kind: structs.ServiceKindMeshGateway, Service: "mesh-gateway", @@ -466,7 +529,7 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st Port: 443, }, }, - }, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates)) + }, nsFn, serverSNIFn, testSpliceEvents(baseEvents, extraUpdates)) } func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func(ns *structs.NodeService), extraUpdates []UpdateEvent) *ConfigSnapshot { diff --git a/agent/proxycfg/testing_peering.go b/agent/proxycfg/testing_peering.go index 9b754c977f2f..afa7503923e7 100644 --- a/agent/proxycfg/testing_peering.go +++ b/agent/proxycfg/testing_peering.go @@ -4,13 +4,25 @@ package proxycfg import ( + "bytes" + "text/template" + "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/private/pbpeering" ) func TestConfigSnapshotPeering(t testing.T) *ConfigSnapshot { + return testConfigSnapshot(t, false) +} + +func TestConfigSnapshotPeeringWithListenerOverride(t testing.T) *ConfigSnapshot { + return testConfigSnapshot(t, true) +} + +func testConfigSnapshot(t testing.T, listenerOverride bool) *ConfigSnapshot { var ( paymentsUpstream = structs.Upstream{ DestinationName: "payments", @@ -34,6 +46,20 @@ func TestConfigSnapshotPeering(t testing.T) *ConfigSnapshot { paymentsUpstream, refundsUpstream, } + + if listenerOverride { + if ns.Proxy.Upstreams[0].Config == nil { + ns.Proxy.Upstreams[0].Config = map[string]interface{}{} + } + + uid := NewUpstreamID(&ns.Proxy.Upstreams[0]) + + ns.Proxy.Upstreams[0].Config["envoy_listener_json"] = + customListenerJSON(t, customListenerJSONOptions{ + Name: uid.EnvoyID() + ":custom-upstream", + }) + } + }, []UpdateEvent{ { CorrelationID: peerTrustBundleIDPrefix + "cloud", @@ -380,3 +406,53 @@ func TestConfigSnapshotPeeringLocalMeshGateway(t testing.T) *ConfigSnapshot { }, }) } + +var ( + customListenerJSONTemplate = template.Must(template.New("").Parse(customListenerJSONTpl)) +) + +func customListenerJSON(t testing.T, opts customListenerJSONOptions) string { + t.Helper() + var buf bytes.Buffer + require.NoError(t, customListenerJSONTemplate.Execute(&buf, opts)) + return buf.String() +} + +type customListenerJSONOptions struct { + Name string + TLSContext string +} + +const customListenerJSONTpl = `{ + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "{{ .Name }}", + "address": { + "socketAddress": { + "address": "11.11.11.11", + "portValue": 11111 + } + }, + "filterChains": [ + { + {{ if .TLSContext -}} + "transport_socket": { + "name": "tls", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + {{ .TLSContext }} + } + }, + {{- end }} + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "cluster": "random-cluster", + "statPrefix": "foo-stats" + } + } + ] + } + ] +}` diff --git a/agent/xds/listeners_apigateway.go b/agent/xds/listeners_apigateway.go index 633c04f0524b..fcb1ee0829a3 100644 --- a/agent/xds/listeners_apigateway.go +++ b/agent/xds/listeners_apigateway.go @@ -5,6 +5,7 @@ package xds import ( "fmt" + envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" @@ -284,10 +285,7 @@ func makeCommonTLSContextFromSnapshotAPIGatewayListenerConfig(cfgSnap *proxycfg. connectTLSEnabled := (!listenerCfg.TLS.IsEmpty()) - if tlsCfg.SDS != nil { - // Set up listener TLS from SDS - tlsContext = makeCommonTLSContextFromGatewayTLSConfig(*tlsCfg) - } else if connectTLSEnabled { + if connectTLSEnabled { tlsContext = makeCommonTLSContext(cfgSnap.Leaf(), cfgSnap.RootPEMs(), makeTLSParametersFromGatewayTLSConfig(*tlsCfg)) } @@ -316,29 +314,6 @@ func resolveAPIListenerTLSConfig(listenerTLSCfg structs.APIGatewayTLSConfigurati return &mergedCfg, nil } -func routeNameForAPIGatewayUpstream(l structs.IngressListener, s structs.IngressService) string { - key := proxycfg.IngressListenerKeyFromListener(l) - - // If the upstream service doesn't have any TLS overrides then it can just use - // the combined filterchain with all the merged routes. - if !ingressServiceHasSDSOverrides(s) { - return key.RouteName() - } - - // Return a specific route for this service as it needs a custom FilterChain - // to serve its custom cert so we should attach its routes to a separate Route - // too. We need this to be consistent between OSS and Enterprise to avoid xDS - // config golden files in tests conflicting so we can't use ServiceID.String() - // which normalizes to included all identifiers in Enterprise. - sn := s.ToServiceName() - svcIdentifier := sn.Name - if !sn.InDefaultPartition() || !sn.InDefaultNamespace() { - // Non-default partition/namespace, use a full identifier - svcIdentifier = sn.String() - } - return fmt.Sprintf("%s_%s", key.RouteName(), svcIdentifier) -} - // when we have multiple certificates on a single listener, we need // to duplicate the filter chains with multiple TLS contexts func makeInlineOverrideFilterChains(cfgSnap *proxycfg.ConfigSnapshot, diff --git a/agent/xds/listeners_test.go b/agent/xds/listeners_test.go index 10b358bdad15..1d00217f6ec0 100644 --- a/agent/xds/listeners_test.go +++ b/agent/xds/listeners_test.go @@ -10,9 +10,10 @@ import ( "testing" "text/template" - "github.com/hashicorp/consul/agent/xds/testcommon" "github.com/stretchr/testify/assert" + "github.com/hashicorp/consul/agent/xds/testcommon" + envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" testinf "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" @@ -435,6 +436,31 @@ func TestListenersFromSnapshot(t *testing.T) { }, nil) }, }, + { + name: "custom-upstream-with-prepared-query", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) { + for i := range ns.Proxy.Upstreams { + if ns.Proxy.Upstreams[i].DestinationName != "db" { + continue // only tweak the db upstream + } + if ns.Proxy.Upstreams[i].Config == nil { + ns.Proxy.Upstreams[i].Config = map[string]interface{}{} + } + + uid := proxycfg.NewUpstreamID(&ns.Proxy.Upstreams[i]) + + // Triggers an override with the presence of the escape hatch listener + ns.Proxy.Upstreams[i].DestinationType = structs.UpstreamDestTypePreparedQuery + + ns.Proxy.Upstreams[i].Config["envoy_listener_json"] = + customListenerJSON(t, customListenerJSONOptions{ + Name: uid.EnvoyID() + ":custom-upstream", + }) + } + }, nil) + }, + }, { name: "connect-proxy-upstream-defaults", create: func(t testinf.T) *proxycfg.ConfigSnapshot { @@ -476,7 +502,7 @@ func TestListenersFromSnapshot(t *testing.T) { // NOTE: if IPv6 is not supported in the kernel per // kernelSupportsIPv6() then this test will fail because the golden // files were generated assuming ipv6 support was present - name: "expose-checks", + name: "expose-checks-http", create: proxycfg.TestConfigSnapshotExposeChecks, generatorSetup: func(s *ResourceGenerator) { s.CfgFetcher = configFetcherFunc(func() string { @@ -484,6 +510,30 @@ func TestListenersFromSnapshot(t *testing.T) { }) }, }, + { + // NOTE: if IPv6 is not supported in the kernel per + // kernelSupportsIPv6() then this test will fail because the golden + // files were generated assuming ipv6 support was present + name: "expose-checks-http-with-bind-override", + create: proxycfg.TestConfigSnapshotExposeChecksWithBindOverride, + generatorSetup: func(s *ResourceGenerator) { + s.CfgFetcher = configFetcherFunc(func() string { + return "192.0.2.1" + }) + }, + }, + { + // NOTE: if IPv6 is not supported in the kernel per + // kernelSupportsIPv6() then this test will fail because the golden + // files were generated assuming ipv6 support was present + name: "expose-checks-grpc", + create: proxycfg.TestConfigSnapshotExposeChecksGRPC, + generatorSetup: func(s *ResourceGenerator) { + s.CfgFetcher = configFetcherFunc(func() string { + return "192.0.2.1" + }) + }, + }, { name: "mesh-gateway", create: func(t testinf.T) *proxycfg.ConfigSnapshot { @@ -496,6 +546,12 @@ func TestListenersFromSnapshot(t *testing.T) { return proxycfg.TestConfigSnapshotMeshGateway(t, "federation-states", nil, nil) }, }, + { + name: "mesh-gateway-using-federation-control-plane", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotMeshGateway(t, "mesh-gateway-federation", nil, nil) + }, + }, { name: "mesh-gateway-no-services", create: func(t testinf.T) *proxycfg.ConfigSnapshot { @@ -812,6 +868,16 @@ func TestListenersFromSnapshot(t *testing.T) { return proxycfg.TestConfigSnapshotTerminatingGateway(t, true, nil, nil) }, }, + { + name: "terminating-gateway-custom-trace-listener", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotTerminatingGateway(t, true, func(ns *structs.NodeService) { + ns.Proxy.Config = map[string]interface{}{} + ns.Proxy.Config["protocol"] = "http" + ns.Proxy.Config["envoy_listener_tracing_json"] = customTraceJSON(t) + }, nil) + }, + }, { name: "terminating-gateway-with-tls-incoming-min-version", create: func(t testinf.T) *proxycfg.ConfigSnapshot { @@ -982,6 +1048,14 @@ func TestListenersFromSnapshot(t *testing.T) { name: "ingress-with-tls-mixed-min-version-listeners", create: proxycfg.TestConfigSnapshotIngressGateway_TLSMixedMinVersionListeners, }, + { + name: "ingress-with-tls-mixed-max-version-listeners", + create: proxycfg.TestConfigSnapshotIngressGateway_TLSMixedMaxVersionListeners, + }, + { + name: "ingress-with-tls-mixed-cipher-suites-listeners", + create: proxycfg.TestConfigSnapshotIngressGateway_TLSMixedCipherVersionListeners, + }, { name: "ingress-with-sds-listener-gw-level", create: proxycfg.TestConfigSnapshotIngressGatewaySDS_GatewayLevel, @@ -1105,6 +1179,7 @@ func TestListenersFromSnapshot(t *testing.T) { return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) { ns.Proxy.MutualTLSMode = structs.MutualTLSModePermissive ns.Proxy.Mode = structs.ProxyModeTransparent + ns.Proxy.TransparentProxy.OutboundListenerPort = 1234 }, nil) }, @@ -1129,6 +1204,7 @@ func TestListenersFromSnapshot(t *testing.T) { t.Run("envoy-"+envoyVersion, func(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + // Sanity check default with no overrides first snap := tt.create(t) diff --git a/agent/xds/resources_test.go b/agent/xds/resources_test.go index 29743c060bfd..1a0d9b826a78 100644 --- a/agent/xds/resources_test.go +++ b/agent/xds/resources_test.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/consul/agent/consul/discoverychain" "github.com/hashicorp/consul/agent/xds/testcommon" "github.com/hashicorp/consul/envoyextensions/xdscommon" + "github.com/hashicorp/consul/types" testinf "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" @@ -163,6 +164,10 @@ func TestAllResourcesFromSnapshot(t *testing.T) { name: "connect-proxy-with-peered-upstreams", create: proxycfg.TestConfigSnapshotPeering, }, + { + name: "connect-proxy-with-peered-upstreams-listener-override", + create: proxycfg.TestConfigSnapshotPeeringWithListenerOverride, + }, { name: "transparent-proxy-with-peered-upstreams", create: proxycfg.TestConfigSnapshotPeeringTProxy, @@ -326,6 +331,54 @@ RahYIzNLRBTLrwadLAZkApUpZvB8qDK4knsTWFYujNsylCww2A6ajzIMFNU4GkUK NtyHRuD+KYRmjXtyX1yHNqfGN3vOQmwavHq2R8wHYuBSc6LAHHV9vG+j0VsgMELO qwxn8SmLkSKbf2+MsQVzLCXXN5u+D8Yv+4py+oKP4EQ5aFZuDEx+r/G/31rTthww AAJAMaoXmoYVdgXV+CPuBb2M4XCpuzLu3bcA2PXm5ipSyIgntMKwXV7r +-----END CERTIFICATE-----` + // openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -sha256 -days 3650 \ + // -nodes -subj "/C=XX/CN=secondcert.com" -addext "subjectAltName = DNS:secondcert.com" + gatewayTestPrivateKeyTwo = `-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCiPr2HCbVzbZ1M +IW89rfLLrciPTWWl48DF9CmYHS0C2gSD1W6bxzO7zdA+ced0ajI+YsQ9aBAXRhKl +EHgnhBJ6sGsz1XBQ9+lNDHrg9AjugIiHoscYOeCcxMeXhp97ti+vpVsc2/AvEf2K +GIUuOjcufXuRXkWQ2aB4RGyodkgRF6n8YrLJb7pWIjoCNwDAWtZX4wIVFgGq1ew0 +E/E9EyStMYTb5h1lvCpXYRN9AeSFKUQI/y0xsT3+nZ/gyzx3CrgzuSYRgptbuVwm +5F2Q16sLR/EtCBIhA8npKagx/4U7KOilF31I2locH4Aq5l9VJd/6pTA5F4KCAW/E +ybXz6DojAgMBAAECggEAPcOuuRqsFf4ztIjB5XQ0Cu/kexFW0flLKNDTiNIKkZxX +vaxhyDHkculeDnekSkAnUnKdDFdyULnfXTFQ3JI9yrEgjoIBmQFXsno+ySZ9w/Xw +g9om+wUFigirhva7/geUTcSgU/Myk2jA4XKGONv2p98jTGrcBtGickZyKwukUcTa +M18phLdjejg09d45QV5pEtU5m0HuydvtMNCxL2UeWMxyIVezAH2S48m7IAn7Xs4p +J9bwjboDWQYs+zLPfEZyosiJiKugpEKvApIKsJXf4JqRXHN+vvKKDeXkKrrGR+pg +3e5foPjFrLcDltZMkrfnlm8fa0yLnoxdiyd1pDcJaQKBgQDSnJbM6CDb0b3bUyiz +LpfJSBzEPqABM8mNeVHfEjHcBJ7YBOceBxDNasmAPvFbhoDrlHiEYW2QnDLRXKeF +XVdXjSsUV30SPMeg6yeSd8L+LKXLjrGMNGDcJfnjLavv7Glu1xDnYyFSmeVIhWoo +cOhfaFQ69vnHiU1idrOlz6zhPwKBgQDFNcY0S59f3tht7kgnItg8PSfJqJQrIdLt +x6MC2Nc7Eui7/LTuO2rMG6HRA/8zQa/TfnfG7CsUwLB1NiC2R1TtM9YBYPxhMl1o +JeGTfM+tD0lBwPhYpgnOCppuayRCfAsPYA6NcvXcGZbxOigxliOuvgVBH47EAApA +zJ+8b6nKHQKBgQCZ0GDV/4XX5KNq5Z3o1tNl3jOcIzyKBD9kAkGHz+r4C6vSiioc +pP5hd2b4MX/l3yKSapll3R2+qkT24Fs8LEJYn7Hhpk+inR8SaAs7jhmrtgHT2z/R +7IL85QNOJhHXJGqP16PxyVUR1XE9eKpiJKug2joB4lPjpWQN0DE9nKFe0wKBgEo3 +qpgTva7+1sTIYC8aVfaVrVufLePtnswNzbNMl/OLcjsNJ6pgghi+bW+T6X8IwXr+ +pWUfjDcLLV1vOXBf9/4s++UY8uJBahW/69zto9qlXhR44v25vwbjxqq3d7XtqNvo +cpGZKh3jI4M1N9sxfcxNhvyzO69XtIQefh8UhvmhAoGBAKzSA51l50ocOnWSNAXs +QQoU+dYQjLDMtzc5N68EUf1GSjtgkpa3PYjVo09OMeb7+W9LhwHQDNMqgeeEDCsm +B6NDnI4VyjVae7Hqz48WBERJBFMFWiLxEa1m2UwaV2jAubN8FKgH4KzDzOKtJEUy +Rz9IUct6HXsDSs+Q3/zdFmPo +-----END PRIVATE KEY-----` + gatewayTestCertificateTwo = `-----BEGIN CERTIFICATE----- +MIIC7DCCAdSgAwIBAgIJAMHpuSA3ioNPMA0GCSqGSIb3DQEBCwUAMCYxCzAJBgNV +BAYTAlhYMRcwFQYDVQQDDA5zZWNvbmRjZXJ0LmNvbTAeFw0yMzA3MTExNTE1MjBa +Fw0zMzA3MDgxNTE1MjBaMCYxCzAJBgNVBAYTAlhYMRcwFQYDVQQDDA5zZWNvbmRj +ZXJ0LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKI+vYcJtXNt +nUwhbz2t8sutyI9NZaXjwMX0KZgdLQLaBIPVbpvHM7vN0D5x53RqMj5ixD1oEBdG +EqUQeCeEEnqwazPVcFD36U0MeuD0CO6AiIeixxg54JzEx5eGn3u2L6+lWxzb8C8R +/YoYhS46Ny59e5FeRZDZoHhEbKh2SBEXqfxisslvulYiOgI3AMBa1lfjAhUWAarV +7DQT8T0TJK0xhNvmHWW8KldhE30B5IUpRAj/LTGxPf6dn+DLPHcKuDO5JhGCm1u5 +XCbkXZDXqwtH8S0IEiEDyekpqDH/hTso6KUXfUjaWhwfgCrmX1Ul3/qlMDkXgoIB +b8TJtfPoOiMCAwEAAaMdMBswGQYDVR0RBBIwEIIOc2Vjb25kY2VydC5jb20wDQYJ +KoZIhvcNAQELBQADggEBAJvP3deuEpJZktAny6/az09GLSUYddiNCE4sG/2ASj7C +mwRTh2HM4BDnkhW9PNjfHoaWa2TDIhOyHQ5hLYz2tnaeU1sOrADCuFSxGiQqgr8J +prahKh6AzNsXba4rumoO08QTTtJzoa8L6TV4PTQ6gi+OMdbyBe3CQ7DSRzLseHNH +KG5tqRRu+Jm7dUuOXDV4MDHoloyZlksOvIYSC+gaS+ke3XlR+GzOW7hpgn5SIDlv +aR/zlIKXUCvVux3/pNFgW6rduFE0f5Hbc1+J4ghTl8EQu1dwDTax7blXQwE+VDgJ +u4fZGRmoUvvO/bjVCbehBxfJn0rHsxpuD5b4Jg2OZNc= -----END CERTIFICATE-----` ) @@ -391,7 +444,80 @@ func getAPIGatewayGoldenTestCases(t *testing.T) []goldenTestCase { }, }, { - name: "api-gateway-with-http-route-and-inline-certificate", + name: "api-gateway-with-multiple-inline-certificates", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotAPIGateway(t, "default", nil, func(entry *structs.APIGatewayConfigEntry, bound *structs.BoundAPIGatewayConfigEntry) { + entry.Listeners = []structs.APIGatewayListener{ + { + Name: "listener", + Protocol: structs.ListenerProtocolTCP, + Port: 8080, + TLS: structs.APIGatewayTLSConfiguration{ + Certificates: []structs.ResourceReference{{ + Kind: structs.InlineCertificate, + Name: "certificate", + }}, + MinVersion: types.TLSv1_2, + MaxVersion: types.TLSv1_3, + CipherSuites: []types.TLSCipherSuite{ + types.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + types.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + }, + }, + }, + } + bound.Listeners = []structs.BoundAPIGatewayListener{ + { + Name: "listener", + Certificates: []structs.ResourceReference{ + { + Kind: structs.InlineCertificate, + Name: "certificate", + }, + { + Kind: structs.InlineCertificate, + Name: "certificate-too", + }, + }, + Routes: []structs.ResourceReference{{ + Kind: structs.TCPRoute, + Name: "route", + }}, + }, + } + }, + []structs.BoundRoute{ + &structs.TCPRouteConfigEntry{ + Kind: structs.TCPRoute, + Name: "route", + Services: []structs.TCPService{{ + Name: "service", + }}, + Parents: []structs.ResourceReference{ + { + Kind: structs.APIGateway, + Name: "api-gateway", + }, + }, + }, + }, []structs.InlineCertificateConfigEntry{ + { + Kind: structs.InlineCertificate, + Name: "certificate", + PrivateKey: gatewayTestPrivateKey, + Certificate: gatewayTestCertificate, + }, + { + Kind: structs.InlineCertificate, + Name: "certificate-too", + PrivateKey: gatewayTestPrivateKeyTwo, + Certificate: gatewayTestCertificateTwo, + }, + }, nil) + }, + }, + { + name: "api-gateway-with-http-route", create: func(t testinf.T) *proxycfg.ConfigSnapshot { return proxycfg.TestConfigSnapshotAPIGateway(t, "default", nil, func(entry *structs.APIGatewayConfigEntry, bound *structs.BoundAPIGatewayConfigEntry) { entry.Listeners = []structs.APIGatewayListener{ @@ -404,6 +530,10 @@ func getAPIGatewayGoldenTestCases(t *testing.T) []goldenTestCase { bound.Listeners = []structs.BoundAPIGatewayListener{ { Name: "listener", + Certificates: []structs.ResourceReference{{ + Kind: structs.InlineCertificate, + Name: "certificate", + }}, Routes: []structs.ResourceReference{{ Kind: structs.HTTPRoute, Name: "route", @@ -439,7 +569,12 @@ func getAPIGatewayGoldenTestCases(t *testing.T) []goldenTestCase { }, }, }, - }, nil, []proxycfg.UpdateEvent{{ + }, []structs.InlineCertificateConfigEntry{{ + Kind: structs.InlineCertificate, + Name: "certificate", + PrivateKey: gatewayTestPrivateKey, + Certificate: gatewayTestCertificate, + }}, []proxycfg.UpdateEvent{{ CorrelationID: "discovery-chain:" + serviceUID.String(), Result: &structs.DiscoveryChainResponse{ Chain: serviceChain, diff --git a/agent/xds/testdata/clusters/api-gateway-with-http-route-and-inline-certificate.latest.golden b/agent/xds/testdata/clusters/api-gateway-with-http-route.latest.golden similarity index 100% rename from agent/xds/testdata/clusters/api-gateway-with-http-route-and-inline-certificate.latest.golden rename to agent/xds/testdata/clusters/api-gateway-with-http-route.latest.golden diff --git a/agent/xds/testdata/clusters/api-gateway-with-multiple-inline-certificates.latest.golden b/agent/xds/testdata/clusters/api-gateway-with-multiple-inline-certificates.latest.golden new file mode 100644 index 000000000000..e20479dfd1cf --- /dev/null +++ b/agent/xds/testdata/clusters/api-gateway-with-multiple-inline-certificates.latest.golden @@ -0,0 +1,55 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": {}, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": {}, + "outlierDetection": {}, + "commonLbConfig": { + "healthyPanicThreshold": {} + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/service" + } + ] + } + }, + "sni": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams-listener-override.latest.golden b/agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams-listener-override.latest.golden new file mode 100644 index 000000000000..8ae9b0853749 --- /dev/null +++ b/agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams-listener-override.latest.golden @@ -0,0 +1,147 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "local_app", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "local_app", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 8080 + } + } + } + } + ] + } + ] + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "payments.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "type": "LOGICAL_DNS", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "payments.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "123.us-east-1.elb.notaws.com", + "portValue": 8443 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + "circuitBreakers": {}, + "dnsRefreshRate": "10s", + "dnsLookupFamily": "V4_ONLY", + "outlierDetection": { + "maxEjectionPercent": 100 + }, + "commonLbConfig": { + "healthyPanicThreshold": {} + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICczCCAdwCCQC3BLnEmLCrSjANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCQVoxEjAQBgNVBAcMCUZsYWdzdGFmZjEMMAoGA1UECgwDRm9v\nMRAwDgYDVQQLDAdleGFtcGxlMQ8wDQYDVQQDDAZwZWVyLWExHTAbBgkqhkiG9w0B\nCQEWDmZvb0BwZWVyLWEuY29tMB4XDTIyMDUyNjAxMDQ0NFoXDTIzMDUyNjAxMDQ0\nNFowfjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkFaMRIwEAYDVQQHDAlGbGFnc3Rh\nZmYxDDAKBgNVBAoMA0ZvbzEQMA4GA1UECwwHZXhhbXBsZTEPMA0GA1UEAwwGcGVl\nci1hMR0wGwYJKoZIhvcNAQkBFg5mb29AcGVlci1hLmNvbTCBnzANBgkqhkiG9w0B\nAQEFAAOBjQAwgYkCgYEA2zFYGTbXDAntT5pLTpZ2+VTiqx4J63VRJH1kdu11f0FV\nc2jl1pqCuYDbQXknDU0Pv1Q5y0+nSAihD2KqGS571r+vHQiPtKYPYRqPEe9FzAhR\n2KhWH6v/tk5DG1HqOjV9/zWRKB12gdFNZZqnw/e7NjLNq3wZ2UAwxXip5uJ8uwMC\nAwEAATANBgkqhkiG9w0BAQsFAAOBgQC/CJ9Syf4aL91wZizKTejwouRYoWv4gRAk\nyto45ZcNMHfJ0G2z+XAMl9ZbQsLgXmzAx4IM6y5Jckq8pKC4PEijCjlKTktLHlEy\n0ggmFxtNB1tid2NC8dOzcQ3l45+gDjDqdILhAvLDjlAIebdkqVqb2CfFNW/I2CQH\nZAuKN1aoKA==\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cloud-dc/svc/payments" + } + ] + } + }, + "sni": "payments.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "refunds.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": {}, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": {}, + "outlierDetection": { + "maxEjectionPercent": 100 + }, + "commonLbConfig": { + "healthyPanicThreshold": {} + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICczCCAdwCCQC3BLnEmLCrSjANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCQVoxEjAQBgNVBAcMCUZsYWdzdGFmZjEMMAoGA1UECgwDRm9v\nMRAwDgYDVQQLDAdleGFtcGxlMQ8wDQYDVQQDDAZwZWVyLWExHTAbBgkqhkiG9w0B\nCQEWDmZvb0BwZWVyLWEuY29tMB4XDTIyMDUyNjAxMDQ0NFoXDTIzMDUyNjAxMDQ0\nNFowfjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkFaMRIwEAYDVQQHDAlGbGFnc3Rh\nZmYxDDAKBgNVBAoMA0ZvbzEQMA4GA1UECwwHZXhhbXBsZTEPMA0GA1UEAwwGcGVl\nci1hMR0wGwYJKoZIhvcNAQkBFg5mb29AcGVlci1hLmNvbTCBnzANBgkqhkiG9w0B\nAQEFAAOBjQAwgYkCgYEA2zFYGTbXDAntT5pLTpZ2+VTiqx4J63VRJH1kdu11f0FV\nc2jl1pqCuYDbQXknDU0Pv1Q5y0+nSAihD2KqGS571r+vHQiPtKYPYRqPEe9FzAhR\n2KhWH6v/tk5DG1HqOjV9/zWRKB12gdFNZZqnw/e7NjLNq3wZ2UAwxXip5uJ8uwMC\nAwEAATANBgkqhkiG9w0BAQsFAAOBgQC/CJ9Syf4aL91wZizKTejwouRYoWv4gRAk\nyto45ZcNMHfJ0G2z+XAMl9ZbQsLgXmzAx4IM6y5Jckq8pKC4PEijCjlKTktLHlEy\n0ggmFxtNB1tid2NC8dOzcQ3l45+gDjDqdILhAvLDjlAIebdkqVqb2CfFNW/I2CQH\nZAuKN1aoKA==\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cloud-dc/svc/refunds" + } + ] + } + }, + "sni": "refunds.default.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul" + } + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/api-gateway-with-http-route-and-inline-certificate.latest.golden b/agent/xds/testdata/endpoints/api-gateway-with-http-route.latest.golden similarity index 100% rename from agent/xds/testdata/endpoints/api-gateway-with-http-route-and-inline-certificate.latest.golden rename to agent/xds/testdata/endpoints/api-gateway-with-http-route.latest.golden diff --git a/agent/xds/testdata/endpoints/api-gateway-with-multiple-inline-certificates.latest.golden b/agent/xds/testdata/endpoints/api-gateway-with-multiple-inline-certificates.latest.golden new file mode 100644 index 000000000000..47b46bca225b --- /dev/null +++ b/agent/xds/testdata/endpoints/api-gateway-with-multiple-inline-certificates.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/connect-proxy-with-peered-upstreams-listener-override.latest.golden b/agent/xds/testdata/endpoints/connect-proxy-with-peered-upstreams-listener-override.latest.golden new file mode 100644 index 000000000000..9dc909faf7c0 --- /dev/null +++ b/agent/xds/testdata/endpoints/connect-proxy-with-peered-upstreams-listener-override.latest.golden @@ -0,0 +1,29 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "refunds.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "106.96.90.233", + "portValue": 443 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + } + ], + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/api-gateway-with-http-route-and-inline-certificate.latest.golden b/agent/xds/testdata/listeners/api-gateway-with-http-route-and-inline-certificate.latest.golden deleted file mode 100644 index e9bee988de93..000000000000 --- a/agent/xds/testdata/listeners/api-gateway-with-http-route-and-inline-certificate.latest.golden +++ /dev/null @@ -1,49 +0,0 @@ -{ - "versionInfo": "00000001", - "resources": [ - { - "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", - "name": "http:1.2.3.4:8080", - "address": { - "socketAddress": { - "address": "1.2.3.4", - "portValue": 8080 - } - }, - "filterChains": [ - { - "filters": [ - { - "name": "envoy.filters.network.http_connection_manager", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", - "statPrefix": "ingress_upstream_8080", - "rds": { - "configSource": { - "ads": {}, - "resourceApiVersion": "V3" - }, - "routeConfigName": "8080" - }, - "httpFilters": [ - { - "name": "envoy.filters.http.router", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" - } - } - ], - "tracing": { - "randomSampling": {} - } - } - } - ] - } - ], - "trafficDirection": "OUTBOUND" - } - ], - "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", - "nonce": "00000001" -} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/api-gateway-with-http-route.latest.golden b/agent/xds/testdata/listeners/api-gateway-with-http-route.latest.golden new file mode 100644 index 000000000000..670911b8f97a --- /dev/null +++ b/agent/xds/testdata/listeners/api-gateway-with-http-route.latest.golden @@ -0,0 +1,80 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "http:1.2.3.4:8080", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8080 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "ingress_upstream_certificate", + "rds": { + "configSource": { + "ads": {}, + "resourceApiVersion": "V3" + }, + "routeConfigName": "8080" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": {} + } + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICljCCAX4CCQCQMDsYO8FrPjANBgkqhkiG9w0BAQsFADANMQswCQYDVQQGEwJV\nUzAeFw0yMjEyMjAxNzUwMjVaFw0yNzEyMTkxNzUwMjVaMA0xCzAJBgNVBAYTAlVT\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx95Opa6t4lGEpiTUogEB\nptqOdam2ch4BHQGhNhX/MrDwwuZQhttBwMfngQ/wd9NmYEPAwj0dumUoAITIq6i2\njQlhqTodElkbsd5vWY8R/bxJWQSoNvVE12TlzECxGpJEiHt4W0r8pGffk+rvplji\nUyCfnT1kGF3znOSjK1hRMTn6RKWCyYaBvXQiB4SGilfLgJcEpOJKtISIxmZ+S409\ng9X5VU88/Bmmrz4cMyxce86Kc2ug5/MOv0CjWDJwlrv8njneV2zvraQ61DDwQftr\nXOvuCbO5IBRHMOBHiHTZ4rtGuhMaIr21V4vb6n8c4YzXiFvhUYcyX7rltGZzVd+W\nmQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBfCqoUIdPf/HGSbOorPyZWbyizNtHJ\nGL7x9cAeIYxpI5Y/WcO1o5v94lvrgm3FNfJoGKbV66+JxOge731FrfMpHplhar1Z\nRahYIzNLRBTLrwadLAZkApUpZvB8qDK4knsTWFYujNsylCww2A6ajzIMFNU4GkUK\nNtyHRuD+KYRmjXtyX1yHNqfGN3vOQmwavHq2R8wHYuBSc6LAHHV9vG+j0VsgMELO\nqwxn8SmLkSKbf2+MsQVzLCXXN5u+D8Yv+4py+oKP4EQ5aFZuDEx+r/G/31rTthww\nAAJAMaoXmoYVdgXV+CPuBb2M4XCpuzLu3bcA2PXm5ipSyIgntMKwXV7r\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAx95Opa6t4lGEpiTUogEBptqOdam2ch4BHQGhNhX/MrDwwuZQ\nhttBwMfngQ/wd9NmYEPAwj0dumUoAITIq6i2jQlhqTodElkbsd5vWY8R/bxJWQSo\nNvVE12TlzECxGpJEiHt4W0r8pGffk+rvpljiUyCfnT1kGF3znOSjK1hRMTn6RKWC\nyYaBvXQiB4SGilfLgJcEpOJKtISIxmZ+S409g9X5VU88/Bmmrz4cMyxce86Kc2ug\n5/MOv0CjWDJwlrv8njneV2zvraQ61DDwQftrXOvuCbO5IBRHMOBHiHTZ4rtGuhMa\nIr21V4vb6n8c4YzXiFvhUYcyX7rltGZzVd+WmQIDAQABAoIBACYvceUzp2MK4gYA\nGWPOP2uKbBdM0l+hHeNV0WAM+dHMfmMuL4pkT36ucqt0ySOLjw6rQyOZG5nmA6t9\nsv0g4ae2eCMlyDIeNi1Yavu4Wt6YX4cTXbQKThm83C6W2X9THKbauBbxD621bsDK\n7PhiGPN60yPue7YwFQAPqqD4YaK+s22HFIzk9gwM/rkvAUNwRv7SyHMiFe4Igc1C\nEev7iHWzvj5Heoz6XfF+XNF9DU+TieSUAdjd56VyUb8XL4+uBTOhHwLiXvAmfaMR\nHvpcxeKnYZusS6NaOxcUHiJnsLNWrxmJj9WEGgQzuLxcLjTe4vVmELVZD8t3QUKj\nPAxu8tUCgYEA7KIWVn9dfVpokReorFym+J8FzLwSktP9RZYEMonJo00i8aii3K9s\nu/aSwRWQSCzmON1ZcxZzWhwQF9usz6kGCk//9+4hlVW90GtNK0RD+j7sp4aT2JI8\n9eLEjTG+xSXa7XWe98QncjjL9lu/yrRncSTxHs13q/XP198nn2aYuQ8CgYEA2Dnt\nsRBzv0fFEvzzFv7G/5f85mouN38TUYvxNRTjBLCXl9DeKjDkOVZ2b6qlfQnYXIru\nH+W+v+AZEb6fySXc8FRab7lkgTMrwE+aeI4rkW7asVwtclv01QJ5wMnyT84AgDD/\nDgt/RThFaHgtU9TW5GOZveL+l9fVPn7vKFdTJdcCgYEArJ99zjHxwJ1whNAOk1av\n09UmRPm6TvRo4heTDk8oEoIWCNatoHI0z1YMLuENNSnT9Q280FFDayvnrY/qnD7A\nkktT/sjwJOG8q8trKzIMqQS4XWm2dxoPcIyyOBJfCbEY6XuRsUuePxwh5qF942EB\nyS9a2s6nC4Ix0lgPrqAIr48CgYBgS/Q6riwOXSU8nqCYdiEkBYlhCJrKpnJxF9T1\nofa0yPzKZP/8ZEfP7VzTwHjxJehQ1qLUW9pG08P2biH1UEKEWdzo8vT6wVJT1F/k\nHtTycR8+a+Hlk2SHVRHqNUYQGpuIe8mrdJ1as4Pd0d/F/P0zO9Rlh+mAsGPM8HUM\nT0+9gwKBgHDoerX7NTskg0H0t8O+iSMevdxpEWp34ZYa9gHiftTQGyrRgERCa7Gj\nnZPAxKb2JoWyfnu3v7G5gZ8fhDFsiOxLbZv6UZJBbUIh1MjJISpXrForDrC2QNLX\nkHrHfwBFDB3KMudhQknsJzEJKCL/KmFH6o0MvsoaT9yzEl3K+ah/\n-----END RSA PRIVATE KEY-----\n" + } + } + ], + "alpnProtocols": [ + "http/1.1" + ] + }, + "requireClientCertificate": false + } + } + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } + } + ], + "trafficDirection": "OUTBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/api-gateway-with-multiple-inline-certificates.latest.golden b/agent/xds/testdata/listeners/api-gateway-with-multiple-inline-certificates.latest.golden new file mode 100644 index 000000000000..272946812374 --- /dev/null +++ b/agent/xds/testdata/listeners/api-gateway-with-multiple-inline-certificates.latest.golden @@ -0,0 +1,102 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "service:1.2.3.4:8080", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8080 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "serverNames": [ + "" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "ingress_upstream_certificate", + "cluster": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICljCCAX4CCQCQMDsYO8FrPjANBgkqhkiG9w0BAQsFADANMQswCQYDVQQGEwJV\nUzAeFw0yMjEyMjAxNzUwMjVaFw0yNzEyMTkxNzUwMjVaMA0xCzAJBgNVBAYTAlVT\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx95Opa6t4lGEpiTUogEB\nptqOdam2ch4BHQGhNhX/MrDwwuZQhttBwMfngQ/wd9NmYEPAwj0dumUoAITIq6i2\njQlhqTodElkbsd5vWY8R/bxJWQSoNvVE12TlzECxGpJEiHt4W0r8pGffk+rvplji\nUyCfnT1kGF3znOSjK1hRMTn6RKWCyYaBvXQiB4SGilfLgJcEpOJKtISIxmZ+S409\ng9X5VU88/Bmmrz4cMyxce86Kc2ug5/MOv0CjWDJwlrv8njneV2zvraQ61DDwQftr\nXOvuCbO5IBRHMOBHiHTZ4rtGuhMaIr21V4vb6n8c4YzXiFvhUYcyX7rltGZzVd+W\nmQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBfCqoUIdPf/HGSbOorPyZWbyizNtHJ\nGL7x9cAeIYxpI5Y/WcO1o5v94lvrgm3FNfJoGKbV66+JxOge731FrfMpHplhar1Z\nRahYIzNLRBTLrwadLAZkApUpZvB8qDK4knsTWFYujNsylCww2A6ajzIMFNU4GkUK\nNtyHRuD+KYRmjXtyX1yHNqfGN3vOQmwavHq2R8wHYuBSc6LAHHV9vG+j0VsgMELO\nqwxn8SmLkSKbf2+MsQVzLCXXN5u+D8Yv+4py+oKP4EQ5aFZuDEx+r/G/31rTthww\nAAJAMaoXmoYVdgXV+CPuBb2M4XCpuzLu3bcA2PXm5ipSyIgntMKwXV7r\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAx95Opa6t4lGEpiTUogEBptqOdam2ch4BHQGhNhX/MrDwwuZQ\nhttBwMfngQ/wd9NmYEPAwj0dumUoAITIq6i2jQlhqTodElkbsd5vWY8R/bxJWQSo\nNvVE12TlzECxGpJEiHt4W0r8pGffk+rvpljiUyCfnT1kGF3znOSjK1hRMTn6RKWC\nyYaBvXQiB4SGilfLgJcEpOJKtISIxmZ+S409g9X5VU88/Bmmrz4cMyxce86Kc2ug\n5/MOv0CjWDJwlrv8njneV2zvraQ61DDwQftrXOvuCbO5IBRHMOBHiHTZ4rtGuhMa\nIr21V4vb6n8c4YzXiFvhUYcyX7rltGZzVd+WmQIDAQABAoIBACYvceUzp2MK4gYA\nGWPOP2uKbBdM0l+hHeNV0WAM+dHMfmMuL4pkT36ucqt0ySOLjw6rQyOZG5nmA6t9\nsv0g4ae2eCMlyDIeNi1Yavu4Wt6YX4cTXbQKThm83C6W2X9THKbauBbxD621bsDK\n7PhiGPN60yPue7YwFQAPqqD4YaK+s22HFIzk9gwM/rkvAUNwRv7SyHMiFe4Igc1C\nEev7iHWzvj5Heoz6XfF+XNF9DU+TieSUAdjd56VyUb8XL4+uBTOhHwLiXvAmfaMR\nHvpcxeKnYZusS6NaOxcUHiJnsLNWrxmJj9WEGgQzuLxcLjTe4vVmELVZD8t3QUKj\nPAxu8tUCgYEA7KIWVn9dfVpokReorFym+J8FzLwSktP9RZYEMonJo00i8aii3K9s\nu/aSwRWQSCzmON1ZcxZzWhwQF9usz6kGCk//9+4hlVW90GtNK0RD+j7sp4aT2JI8\n9eLEjTG+xSXa7XWe98QncjjL9lu/yrRncSTxHs13q/XP198nn2aYuQ8CgYEA2Dnt\nsRBzv0fFEvzzFv7G/5f85mouN38TUYvxNRTjBLCXl9DeKjDkOVZ2b6qlfQnYXIru\nH+W+v+AZEb6fySXc8FRab7lkgTMrwE+aeI4rkW7asVwtclv01QJ5wMnyT84AgDD/\nDgt/RThFaHgtU9TW5GOZveL+l9fVPn7vKFdTJdcCgYEArJ99zjHxwJ1whNAOk1av\n09UmRPm6TvRo4heTDk8oEoIWCNatoHI0z1YMLuENNSnT9Q280FFDayvnrY/qnD7A\nkktT/sjwJOG8q8trKzIMqQS4XWm2dxoPcIyyOBJfCbEY6XuRsUuePxwh5qF942EB\nyS9a2s6nC4Ix0lgPrqAIr48CgYBgS/Q6riwOXSU8nqCYdiEkBYlhCJrKpnJxF9T1\nofa0yPzKZP/8ZEfP7VzTwHjxJehQ1qLUW9pG08P2biH1UEKEWdzo8vT6wVJT1F/k\nHtTycR8+a+Hlk2SHVRHqNUYQGpuIe8mrdJ1as4Pd0d/F/P0zO9Rlh+mAsGPM8HUM\nT0+9gwKBgHDoerX7NTskg0H0t8O+iSMevdxpEWp34ZYa9gHiftTQGyrRgERCa7Gj\nnZPAxKb2JoWyfnu3v7G5gZ8fhDFsiOxLbZv6UZJBbUIh1MjJISpXrForDrC2QNLX\nkHrHfwBFDB3KMudhQknsJzEJKCL/KmFH6o0MvsoaT9yzEl3K+ah/\n-----END RSA PRIVATE KEY-----\n" + } + } + ] + }, + "requireClientCertificate": false + } + } + }, + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "ingress_upstream_default", + "cluster": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": false + } + } + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } + } + ], + "trafficDirection": "OUTBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/connect-proxy-with-peered-upstreams-listener-override.latest.golden b/agent/xds/testdata/listeners/connect-proxy-with-peered-upstreams-listener-override.latest.golden new file mode 100644 index 000000000000..76c41c8b7c97 --- /dev/null +++ b/agent/xds/testdata/listeners/connect-proxy-with-peered-upstreams-listener-override.latest.golden @@ -0,0 +1,114 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "payments?peer=cloud:custom-upstream", + "address": { + "socketAddress": { + "address": "11.11.11.11", + "portValue": 11111 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "foo-stats", + "cluster": "random-cluster" + } + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": {}, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "refunds?peer=cloud:127.0.0.1:9090", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9090 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream_peered.refunds.default.cloud", + "cluster": "refunds.default.cloud.external.1c053652-8512-4373-90cf-5a7f6263a994.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/connect-proxy-with-tproxy-and-permissive-mtls.latest.golden b/agent/xds/testdata/listeners/connect-proxy-with-tproxy-and-permissive-mtls.latest.golden index 549af29cb445..f091cd40b552 100644 --- a/agent/xds/testdata/listeners/connect-proxy-with-tproxy-and-permissive-mtls.latest.golden +++ b/agent/xds/testdata/listeners/connect-proxy-with-tproxy-and-permissive-mtls.latest.golden @@ -28,11 +28,11 @@ }, { "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", - "name": "outbound_listener:127.0.0.1:15001", + "name": "outbound_listener:127.0.0.1:1234", "address": { "socketAddress": { "address": "127.0.0.1", - "portValue": 15001 + "portValue": 1234 } }, "defaultFilterChain": { @@ -166,4 +166,4 @@ ], "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", "nonce": "00000001" -} +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/custom-upstream-with-prepared-query.latest.golden b/agent/xds/testdata/listeners/custom-upstream-with-prepared-query.latest.golden new file mode 100644 index 000000000000..636fb2f606b8 --- /dev/null +++ b/agent/xds/testdata/listeners/custom-upstream-with-prepared-query.latest.golden @@ -0,0 +1,114 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "db:custom-upstream", + "address": { + "socketAddress": { + "address": "11.11.11.11", + "portValue": 11111 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "foo-stats", + "cluster": "random-cluster" + } + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "prepared_query:geo-cache:127.10.10.10:8181", + "address": { + "socketAddress": { + "address": "127.10.10.10", + "portValue": 8181 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.prepared_query_geo-cache", + "cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": {}, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/expose-checks.latest.golden b/agent/xds/testdata/listeners/expose-checks-grpc.latest.golden similarity index 89% rename from agent/xds/testdata/listeners/expose-checks.latest.golden rename to agent/xds/testdata/listeners/expose-checks-grpc.latest.golden index 518285040d05..a73fdd5a7994 100644 --- a/agent/xds/testdata/listeners/expose-checks.latest.golden +++ b/agent/xds/testdata/listeners/expose-checks-grpc.latest.golden @@ -3,11 +3,11 @@ "resources": [ { "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", - "name": "exposed_path_debug:1.2.3.4:21500", + "name": "exposed_path_grpchealthv1HealthCheck:1.2.3.4:21501", "address": { "socketAddress": { "address": "1.2.3.4", - "portValue": 21500 + "portValue": 21501 } }, "filterChains": [ @@ -33,22 +33,22 @@ "name": "envoy.filters.network.http_connection_manager", "typedConfig": { "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", - "statPrefix": "exposed_path_filter_debug_21500", + "statPrefix": "exposed_path_filter_grpchealthv1HealthCheck_21501", "routeConfig": { - "name": "exposed_path_filter_debug_21500", + "name": "exposed_path_filter_grpchealthv1HealthCheck_21501", "virtualHosts": [ { - "name": "exposed_path_filter_debug_21500", + "name": "exposed_path_filter_grpchealthv1HealthCheck_21501", "domains": [ "*" ], "routes": [ { "match": { - "path": "/debug" + "path": "/grpc.health.v1.Health/Check" }, "route": { - "cluster": "exposed_cluster_8181" + "cluster": "exposed_cluster_9090" } } ] @@ -64,10 +64,9 @@ } ], "tracing": { - "randomSampling": { - - } - } + "randomSampling": {} + }, + "http2ProtocolOptions": {} } } ] @@ -77,11 +76,11 @@ }, { "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", - "name": "public_listener:1.2.3.4:8080", + "name": "public_listener:1.2.3.4:9090", "address": { "socketAddress": { "address": "1.2.3.4", - "portValue": 8080 + "portValue": 9090 } }, "filterChains": [ @@ -91,9 +90,7 @@ "name": "envoy.filters.network.rbac", "typedConfig": { "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", - "rules": { - - }, + "rules": {}, "statPrefix": "connect_authz" } }, @@ -111,9 +108,7 @@ "typedConfig": { "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", "commonTlsContext": { - "tlsParams": { - - }, + "tlsParams": {}, "tlsCertificates": [ { "certificateChain": { diff --git a/agent/xds/testdata/listeners/expose-checks-http-with-bind-override.latest.golden b/agent/xds/testdata/listeners/expose-checks-http-with-bind-override.latest.golden new file mode 100644 index 000000000000..fc97741bb875 --- /dev/null +++ b/agent/xds/testdata/listeners/expose-checks-http-with-bind-override.latest.golden @@ -0,0 +1,137 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "exposed_path_debug:6.7.8.9:21500", + "address": { + "socketAddress": { + "address": "6.7.8.9", + "portValue": 21500 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "sourcePrefixRanges": [ + { + "addressPrefix": "127.0.0.1", + "prefixLen": 8 + }, + { + "addressPrefix": "192.0.2.1", + "prefixLen": 32 + }, + { + "addressPrefix": "::1", + "prefixLen": 128 + } + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "exposed_path_filter_debug_21500", + "routeConfig": { + "name": "exposed_path_filter_debug_21500", + "virtualHosts": [ + { + "name": "exposed_path_filter_debug_21500", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "path": "/debug" + }, + "route": { + "cluster": "exposed_cluster_8181" + } + } + ] + } + ] + }, + "httpFilters": [ + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": {} + } + } + } + ] + } + ], + "trafficDirection": "INBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:6.7.8.9:8080", + "address": { + "socketAddress": { + "address": "6.7.8.9", + "portValue": 8080 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": {}, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/expose-checks-http.latest.golden b/agent/xds/testdata/listeners/expose-checks-http.latest.golden new file mode 100644 index 000000000000..a5e582d5ee72 --- /dev/null +++ b/agent/xds/testdata/listeners/expose-checks-http.latest.golden @@ -0,0 +1,137 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "exposed_path_debug:1.2.3.4:21500", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 21500 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "sourcePrefixRanges": [ + { + "addressPrefix": "127.0.0.1", + "prefixLen": 8 + }, + { + "addressPrefix": "192.0.2.1", + "prefixLen": 32 + }, + { + "addressPrefix": "::1", + "prefixLen": 128 + } + ] + }, + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "exposed_path_filter_debug_21500", + "routeConfig": { + "name": "exposed_path_filter_debug_21500", + "virtualHosts": [ + { + "name": "exposed_path_filter_debug_21500", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "path": "/debug" + }, + "route": { + "cluster": "exposed_cluster_8181" + } + } + ] + } + ] + }, + "httpFilters": [ + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": {} + } + } + } + ] + } + ], + "trafficDirection": "INBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:1.2.3.4:8080", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8080 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": {}, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/ingress-with-tls-mixed-cipher-suites-listeners.latest.golden b/agent/xds/testdata/listeners/ingress-with-tls-mixed-cipher-suites-listeners.latest.golden new file mode 100644 index 000000000000..e480318be9e9 --- /dev/null +++ b/agent/xds/testdata/listeners/ingress-with-tls-mixed-cipher-suites-listeners.latest.golden @@ -0,0 +1,156 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "http:1.2.3.4:8080", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8080 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "ingress_upstream_8080", + "rds": { + "configSource": { + "ads": {}, + "resourceApiVersion": "V3" + }, + "routeConfigName": "8080" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": {} + } + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-RSA-AES256-SHA" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + }, + "alpnProtocols": [ + "http/1.1" + ] + }, + "requireClientCertificate": false + } + } + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "http:1.2.3.4:8081", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8081 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "ingress_upstream_8081", + "rds": { + "configSource": { + "ads": {}, + "resourceApiVersion": "V3" + }, + "routeConfigName": "8081" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": {} + } + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "cipherSuites": [ + "ECDHE-RSA-CHACHA20-POLY1305", + "ECDHE-ECDSA-CHACHA20-POLY1305" + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + }, + "alpnProtocols": [ + "http/1.1" + ] + }, + "requireClientCertificate": false + } + } + } + ], + "trafficDirection": "OUTBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/ingress-with-tls-mixed-max-version-listeners.latest.golden b/agent/xds/testdata/listeners/ingress-with-tls-mixed-max-version-listeners.latest.golden new file mode 100644 index 000000000000..ecc1963a1bb3 --- /dev/null +++ b/agent/xds/testdata/listeners/ingress-with-tls-mixed-max-version-listeners.latest.golden @@ -0,0 +1,223 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "http:1.2.3.4:8080", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8080 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "ingress_upstream_8080", + "rds": { + "configSource": { + "ads": {}, + "resourceApiVersion": "V3" + }, + "routeConfigName": "8080" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": {} + } + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_2" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + }, + "alpnProtocols": [ + "http/1.1" + ] + }, + "requireClientCertificate": false + } + } + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "http:1.2.3.4:8081", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8081 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "ingress_upstream_8081", + "rds": { + "configSource": { + "ads": {}, + "resourceApiVersion": "V3" + }, + "routeConfigName": "8081" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": {} + } + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_0" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + }, + "alpnProtocols": [ + "http/1.1" + ] + }, + "requireClientCertificate": false + } + } + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "http:1.2.3.4:8082", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8082 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "ingress_upstream_8082", + "rds": { + "configSource": { + "ads": {}, + "resourceApiVersion": "V3" + }, + "routeConfigName": "8082" + }, + "httpFilters": [ + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "randomSampling": {} + } + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + "tlsMaximumProtocolVersion": "TLSv1_3" + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + }, + "alpnProtocols": [ + "http/1.1" + ] + }, + "requireClientCertificate": false + } + } + } + ], + "trafficDirection": "OUTBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/mesh-gateway-using-federation-control-plane.latest.golden b/agent/xds/testdata/listeners/mesh-gateway-using-federation-control-plane.latest.golden new file mode 100644 index 000000000000..7221eaf00fd6 --- /dev/null +++ b/agent/xds/testdata/listeners/mesh-gateway-using-federation-control-plane.latest.golden @@ -0,0 +1,181 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "default:1.2.3.4:8443", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8443 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "serverNames": [ + "*.dc2.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "mesh_gateway_remote.default.dc2", + "cluster": "dc2.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + }, + { + "filterChainMatch": { + "serverNames": [ + "*.dc4.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "mesh_gateway_remote.default.dc4", + "cluster": "dc4.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + }, + { + "filterChainMatch": { + "serverNames": [ + "*.dc6.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "mesh_gateway_remote.default.dc6", + "cluster": "dc6.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + }, + { + "filterChainMatch": { + "serverNames": [ + "*.server.dc2.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "mesh_gateway_remote.default.dc2", + "cluster": "server.dc2.consul" + } + } + ] + }, + { + "filterChainMatch": { + "serverNames": [ + "*.server.dc4.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "mesh_gateway_remote.default.dc4", + "cluster": "server.dc4.consul" + } + } + ] + }, + { + "filterChainMatch": { + "serverNames": [ + "*.server.dc6.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "mesh_gateway_remote.default.dc6", + "cluster": "server.dc6.consul" + } + } + ] + }, + { + "filterChainMatch": { + "serverNames": [ + "node1.server.dc1.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "mesh_gateway_local_server.default.dc1", + "cluster": "node1.server.dc1.consul" + } + } + ] + }, + { + "filterChainMatch": { + "serverNames": [ + "node2.server.dc1.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "mesh_gateway_local_server.default.dc1", + "cluster": "node2.server.dc1.consul" + } + } + ] + }, + { + "filters": [ + { + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "mesh_gateway_local.default", + "cluster": "" + } + } + ] + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } + } + ] + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/listeners/terminating-gateway-custom-trace-listener.latest.golden b/agent/xds/testdata/listeners/terminating-gateway-custom-trace-listener.latest.golden new file mode 100644 index 000000000000..e85075430303 --- /dev/null +++ b/agent/xds/testdata/listeners/terminating-gateway-custom-trace-listener.latest.golden @@ -0,0 +1,246 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "default:1.2.3.4:8443", + "address": { + "socketAddress": { + "address": "1.2.3.4", + "portValue": 8443 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "serverNames": [ + "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": {}, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.api.default.default.dc1", + "cluster": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": {}, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.cache.default.default.dc1", + "cluster": "cache.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICmjCCAkGgAwIBAgIQe1ZmC0rzRwer6jaH1YIUIjAKBggqhkjOPQQDAjCBuDEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT8wPQYDVQQDEzZDb25zdWwgQWdlbnQgQ0Eg\nODE5ODAwNjg0MDM0MTM3ODkyNDYxNTA1MDk0NDU3OTU1MTQxNjEwHhcNMjAwNjE5\nMTU1MjAzWhcNMjEwNjE5MTU1MjAzWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABH2aWaaa3fpQLBayheHiKlrH\n+z53m0frfGknKjOhOPVYDVHV8x0OE01negswVQbKHAtxPf1M8Zy+WbI9rK7Ua1mj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDf9CPBSUwwZvpeW73oJLTmgQE2\ntW1NKpL5t1uq9WFcqDArBgNVHSMEJDAigCCPPd/NxgZB0tq2M8pdVpPj3Cr79iTv\ni4/T1ysodfMb7zAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0cAMEQCIFCjFZAoXq0s2ied2eIBv0i1KoW5\nIhCylnKFt6iHkyDeAiBBCByTcjHRgEQmqyPojQKoO584EFiczTub9aWdnf9tEw==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEINsen3S8xzxMrKcRZIvxXzhKDn43Tw9ttqWEFU9TqS5hoAoGCCqGSM49\nAwEHoUQDQgAEfZpZpprd+lAsFrKF4eIqWsf7PnebR+t8aScqM6E49VgNUdXzHQ4T\nTWd6CzBVBsocC3E9/UzxnL5Zsj2srtRrWQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": {}, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkOgAwIBAgIRAKF+qDJbaOULNL1TIatrsBowCgYIKoZIzj0EAwIwgbkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB\nIDE4Nzg3MDAwNjUzMDcxOTYzNTk1ODkwNTE1ODY1NjEzMDA2MTU0NDAeFw0yMDA2\nMTkxNTMxMzRaFw0yMTA2MTkxNTMxMzRaMBwxGjAYBgNVBAMTEXNlcnZlci5kYzEu\nY29uc3VsMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEdQ8Igci5f7ZvvCVsxXt9\ntLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZbz/82EwPoS7Dqo3LTK4IuelOimoNNxuk\nkaOBxzCBxDAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG\nAQUFBwMCMAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEILzTLkfJcdWQnTMKUcai/YJq\n0RqH1pjCqtY7SOU4gGOTMCsGA1UdIwQkMCKAIMa2vNcTEC5AGfHIYARJ/4sodX0o\nLzCj3lpw7BcEzPTcMC0GA1UdEQQmMCSCEXNlcnZlci5kYzEuY29uc3Vsgglsb2Nh\nbGhvc3SHBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIgBZ/Z4GSLEc98WvT/qjTVCNTG\n1WNaAaesVbkRx+J0yl8CIQDAVoqY9ByA5vKHjnQrxWlc/JUtJz8wudg7e/OCRriP\nSg==\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIN1v14FaNxgY4MgjDOOWthen8dgwB0lNMs9/j2TfrnxzoAoGCCqGSM49\nAwEHoUQDQgAEdQ8Igci5f7ZvvCVsxXt9tLfvczD+60XHg0OC0+Aka7ZjQfbEjQwZ\nbz/82EwPoS7Dqo3LTK4IuelOimoNNxukkQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filterChainMatch": { + "serverNames": [ + "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + ] + }, + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": {}, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.web.default.default.dc1", + "cluster": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + }, + { + "filters": [ + { + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "terminating_gateway.default", + "cluster": "" + } + } + ] + } + ], + "listenerFilters": [ + { + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/routes/api-gateway-with-http-route-and-inline-certificate.latest.golden b/agent/xds/testdata/routes/api-gateway-with-http-route-and-inline-certificate.latest.golden deleted file mode 100644 index a1669268ec4e..000000000000 --- a/agent/xds/testdata/routes/api-gateway-with-http-route-and-inline-certificate.latest.golden +++ /dev/null @@ -1,50 +0,0 @@ -{ - "versionInfo": "00000001", - "resources": [ - { - "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - "name": "8080", - "virtualHosts": [ - { - "name": "api-gateway-listener-9b9265b", - "domains": [ - "*", - "*:8080" - ], - "routes": [ - { - "match": { - "prefix": "/" - }, - "route": { - "cluster": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" - }, - "requestHeadersToAdd": [ - { - "header": { - "key": "X-Header-Add", - "value": "added" - }, - "append": true - }, - { - "header": { - "key": "X-Header-Set", - "value": "set" - }, - "append": false - } - ], - "requestHeadersToRemove": [ - "X-Header-Remove" - ] - } - ] - } - ], - "validateClusters": true - } - ], - "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - "nonce": "00000001" -} \ No newline at end of file diff --git a/agent/xds/testdata/routes/api-gateway-with-http-route.latest.golden b/agent/xds/testdata/routes/api-gateway-with-http-route.latest.golden new file mode 100644 index 000000000000..e921201831a7 --- /dev/null +++ b/agent/xds/testdata/routes/api-gateway-with-http-route.latest.golden @@ -0,0 +1,50 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "name": "8080", + "virtualHosts": [ + { + "name": "api-gateway-listener-9b9265b", + "domains": [ + "*", + "*:8080" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { + "cluster": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + }, + "requestHeadersToAdd": [ + { + "header": { + "key": "X-Header-Add", + "value": "added" + }, + "append": true + }, + { + "header": { + "key": "X-Header-Set", + "value": "set" + }, + "append": false + } + ], + "requestHeadersToRemove": [ + "X-Header-Remove" + ] + } + ] + } + ], + "validateClusters": true + } + ], + "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/routes/api-gateway-with-multiple-inline-certificates.latest.golden b/agent/xds/testdata/routes/api-gateway-with-multiple-inline-certificates.latest.golden new file mode 100644 index 000000000000..306f5220e7b9 --- /dev/null +++ b/agent/xds/testdata/routes/api-gateway-with-multiple-inline-certificates.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/routes/connect-proxy-with-peered-upstreams-listener-override.latest.golden b/agent/xds/testdata/routes/connect-proxy-with-peered-upstreams-listener-override.latest.golden new file mode 100644 index 000000000000..9c050cbe6b4d --- /dev/null +++ b/agent/xds/testdata/routes/connect-proxy-with-peered-upstreams-listener-override.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/secrets/api-gateway-with-http-route.latest.golden b/agent/xds/testdata/secrets/api-gateway-with-http-route.latest.golden new file mode 100644 index 000000000000..e6c25e165c65 --- /dev/null +++ b/agent/xds/testdata/secrets/api-gateway-with-http-route.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/secrets/api-gateway-with-multiple-inline-certificates.latest.golden b/agent/xds/testdata/secrets/api-gateway-with-multiple-inline-certificates.latest.golden new file mode 100644 index 000000000000..e6c25e165c65 --- /dev/null +++ b/agent/xds/testdata/secrets/api-gateway-with-multiple-inline-certificates.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/secrets/connect-proxy-with-peered-upstreams-listener-override.latest.golden b/agent/xds/testdata/secrets/connect-proxy-with-peered-upstreams-listener-override.latest.golden new file mode 100644 index 000000000000..95612291de70 --- /dev/null +++ b/agent/xds/testdata/secrets/connect-proxy-with-peered-upstreams-listener-override.latest.golden @@ -0,0 +1,5 @@ +{ + "versionInfo": "00000001", + "typeUrl": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret", + "nonce": "00000001" +} \ No newline at end of file From 03cf37e7b8e403cc32a61ece6a3c16863b82da97 Mon Sep 17 00:00:00 2001 From: Ronald Date: Mon, 17 Jul 2023 14:11:19 -0400 Subject: [PATCH 40/43] Re-order expected/actual for assertContainerState in consul container tests (#18157) Re-order expected/actual, consul container tests --- GNUmakefile | 2 +- test/integration/consul-container/libs/assert/service.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 79080311c48b..a7c304c5a1aa 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -54,7 +54,7 @@ QUIET= endif ifeq ("$(GOTAGS)","") -CONSUL_COMPAT_TEST_IMAGE=consul +CONSUL_COMPAT_TEST_IMAGE=hashicorp/consul else CONSUL_COMPAT_TEST_IMAGE=hashicorp/consul-enterprise endif diff --git a/test/integration/consul-container/libs/assert/service.go b/test/integration/consul-container/libs/assert/service.go index 370fbc857db8..c0a8197ff37b 100644 --- a/test/integration/consul-container/libs/assert/service.go +++ b/test/integration/consul-container/libs/assert/service.go @@ -252,5 +252,5 @@ func AssertFortioNameWithClient(t *testing.T, urlbase string, name string, reqHo func AssertContainerState(t *testing.T, service libservice.Service, state string) { containerStatus, err := service.GetStatus() require.NoError(t, err) - require.Equal(t, containerStatus, state, fmt.Sprintf("Expected: %s. Got %s", containerStatus, state)) + require.Equal(t, containerStatus, state, fmt.Sprintf("Expected: %s. Got %s", state, containerStatus)) } From 07fce869af2c5c79ef81409e82dfa9347c0201da Mon Sep 17 00:00:00 2001 From: wangxinyi7 <121973291+wangxinyi7@users.noreply.github.com> Date: Mon, 17 Jul 2023 12:27:24 -0700 Subject: [PATCH 41/43] group and document make file (#17943) * group and document make file --- GNUmakefile => Makefile | 422 +++++++++--------- .../scripts/envoy-library-references.sh | 2 +- docs/README.md | 4 +- 3 files changed, 203 insertions(+), 225 deletions(-) rename GNUmakefile => Makefile (76%) diff --git a/GNUmakefile b/Makefile similarity index 76% rename from GNUmakefile rename to Makefile index a7c304c5a1aa..4f9cedddea3c 100644 --- a/GNUmakefile +++ b/Makefile @@ -66,13 +66,6 @@ BUILD_CONTAINER_NAME?=consul-builder CONSUL_IMAGE_VERSION?=latest ENVOY_VERSION?='1.25.4' -################ -# CI Variables # -################ -CI_DEV_DOCKER_NAMESPACE?=hashicorpdev -CI_DEV_DOCKER_IMAGE_NAME?=consul -CI_DEV_DOCKER_WORKDIR?=bin/ -################ CONSUL_VERSION?=$(shell cat version/VERSION) TEST_MODCACHE?=1 @@ -153,23 +146,27 @@ ifdef SKIP_DOCKER_BUILD ENVOY_INTEG_DEPS=noop endif -all: dev-build +##@ Build + +.PHONY: all +all: dev-build ## Command running by default # used to make integration dependencies conditional noop: ; -# dev creates binaries for testing locally - these are put into ./bin -dev: dev-build +.PHONY: dev +dev: dev-build ## Dev creates binaries for testing locally - these are put into ./bin -dev-build: +.PHONY: dev-build +dev-build: ## Same as dev mkdir -p bin CGO_ENABLED=0 go install -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)" # rm needed due to signature caching (https://apple.stackexchange.com/a/428388) rm -f ./bin/consul cp ${MAIN_GOPATH}/bin/consul ./bin/consul - -dev-docker-dbg: dev-docker +.PHONY: dev-docker-dbg +dev-docker-dbg: dev-docker ## Build containers for debug mode @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" @@ -181,7 +178,8 @@ dev-docker-dbg: dev-docker --load \ -f $(CURDIR)/build-support/docker/Consul-Dev-Dbg.dockerfile $(CURDIR)/pkg/bin/ -dev-docker: linux dev-build +.PHONY: dev-docker +dev-docker: linux dev-build ## Build and tag docker images in dev env @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" @@ -196,12 +194,14 @@ dev-docker: linux dev-build -f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/ docker tag 'consul:local' '$(CONSUL_COMPAT_TEST_IMAGE):local' -check-remote-dev-image-env: +.PHONY: check-remote-dev-image-env +check-remote-dev-image-env: ## Check remote dev image env ifndef REMOTE_DEV_IMAGE $(error REMOTE_DEV_IMAGE is undefined: set this image to /:, e.g. hashicorp/consul-k8s-dev:latest) endif -remote-docker: check-remote-dev-image-env +.PHONY: remote-docker +remote-docker: check-remote-dev-image-env ## Remote docker $(MAKE) GOARCH=amd64 linux $(MAKE) GOARCH=arm64 linux @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" @@ -218,36 +218,58 @@ remote-docker: check-remote-dev-image-env --push \ -f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/ -# In CI, the linux binary will be attached from a previous step at bin/. This make target -# should only run in CI and not locally. -ci.dev-docker: - @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" - @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null - @echo "Building Consul Development container - $(CI_DEV_DOCKER_IMAGE_NAME)" - @docker build $(NOCACHE) $(QUIET) -t '$(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT)' \ - --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ - --label COMMIT_SHA=$(CIRCLE_SHA1) \ - --label PULL_REQUEST=$(CIRCLE_PULL_REQUEST) \ - --label CIRCLE_BUILD_URL=$(CIRCLE_BUILD_URL) \ - $(CI_DEV_DOCKER_WORKDIR) -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile - @echo $(DOCKER_PASS) | docker login -u="$(DOCKER_USER)" --password-stdin - @echo "Pushing dev image to: https://cloud.docker.com/u/hashicorpdev/repository/docker/hashicorpdev/consul" - @docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT) -ifeq ($(CIRCLE_BRANCH), main) - @docker tag $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT) $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest - @docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest -endif - -# linux builds a linux binary compatible with the source platform -linux: +linux: ## Linux builds a linux binary compatible with the source platform @mkdir -p ./pkg/bin/linux_$(GOARCH) CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)" -# dist builds binaries for all platforms and packages them for distribution -dist: - @$(SHELL) $(CURDIR)/build-support/scripts/release.sh -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' $(DIST_VERSION_ARG) $(DIST_DATE_ARG) $(DIST_REL_ARG) +.PHONY: go-mod-tidy +go-mod-tidy: $(foreach mod,$(GO_MODULES),go-mod-tidy/$(mod)) ## Run go mod tidy in every module + +.PHONY: mod-tidy/% +go-mod-tidy/%: + @echo "--> Running go mod tidy ($*)" + @cd $* && go mod tidy + +##@ Checks + +.PHONY: fmt +fmt: $(foreach mod,$(GO_MODULES),fmt/$(mod)) ## Format go modules + +.PHONY: fmt/% +fmt/%: + @echo "--> Running go fmt ($*)" + @cd $* && gofmt -s -l -w . + +.PHONY: lint +lint: $(foreach mod,$(GO_MODULES),lint/$(mod)) lint-container-test-deps ## Lint go modules and test deps + +.PHONY: lint/% +lint/%: + @echo "--> Running golangci-lint ($*)" + @cd $* && GOWORK=off golangci-lint run --build-tags '$(GOTAGS)' + @echo "--> Running lint-consul-retry ($*)" + @cd $* && GOWORK=off lint-consul-retry + @echo "--> Running enumcover ($*)" + @cd $* && GOWORK=off enumcover ./... + +# check that the test-container module only imports allowlisted packages +# from the root consul module. Generally we don't want to allow these imports. +# In a few specific instances though it is okay to import test definitions and +# helpers from some of the packages in the root module. +.PHONY: lint-container-test-deps +lint-container-test-deps: ## Check that the test-container module only imports allowlisted packages from the root consul module. + @echo "--> Checking container tests for bad dependencies" + @cd test/integration/consul-container && \ + $(CURDIR)/build-support/scripts/check-allowed-imports.sh \ + github.com/hashicorp/consul \ + internal/catalog/catalogtest + +##@ Testing -cover: cov +.PHONY: cover +cover: cov ## Run tests and generate coverage report + +.PHONY: cov cov: other-consul dev-build go test -tags '$(GOTAGS)' ./... -coverprofile=coverage.out cd sdk && go test -tags '$(GOTAGS)' ./... -coverprofile=../coverage.sdk.part @@ -256,17 +278,11 @@ cov: other-consul dev-build rm -f coverage.{sdk,api}.part go tool cover -html=coverage.out +.PHONY: test test: other-consul dev-build lint test-internal -.PHONY: go-mod-tidy -go-mod-tidy: $(foreach mod,$(GO_MODULES),go-mod-tidy/$(mod)) - -.PHONY: mod-tidy/% -go-mod-tidy/%: - @echo "--> Running go mod tidy ($*)" - @cd $* && go mod tidy - -test-internal: +.PHONY: test-internal +test-internal: ## Test internal @echo "--> Running go test" @rm -f test.log exit-code @# Dump verbose output to test.log so we can surface test names on failure but @@ -295,113 +311,141 @@ test-internal: @grep '^FAIL' test.log || true @if [ "$$(cat exit-code)" == "0" ] ; then echo "PASS" ; exit 0 ; else exit 1 ; fi -test-all: other-consul dev-build lint $(foreach mod,$(GO_MODULES),test-module/$(mod)) +.PHONY: test-all +test-all: other-consul dev-build lint $(foreach mod,$(GO_MODULES),test-module/$(mod)) ## Test all +.PHONY: test-module/% test-module/%: @echo "--> Running go test ($*)" cd $* && go test $(GOTEST_FLAGS) -tags '$(GOTAGS)' ./... -test-race: +.PHONY: test-race +test-race: ## Test race $(MAKE) GOTEST_FLAGS=-race -test-docker: linux go-build-image - @# -ti run in the foreground showing stdout - @# --rm removes the container once its finished running - @# GO_MODCACHE_VOL - args for mapping in the go module cache - @# GO_BUILD_CACHE_VOL - args for mapping in the go build cache - @# All the env vars are so we pass through all the relevant bits of information - @# Needed for running the tests - @# We map in our local linux_amd64 bin directory as thats where the linux dep - @# target dropped the binary. We could build the binary in the container too - @# but that might take longer as caching gets weird - @# Lastly we map the source dir here to the /consul workdir - @echo "Running tests within a docker container" - @docker run -ti --rm \ - -e 'GOTEST_FLAGS=$(GOTEST_FLAGS)' \ - -e 'GOTAGS=$(GOTAGS)' \ - -e 'GIT_COMMIT=$(GIT_COMMIT)' \ - -e 'GIT_COMMIT_YEAR=$(GIT_COMMIT_YEAR)' \ - -e 'GIT_DIRTY=$(GIT_DIRTY)' \ - $(TEST_PARALLELIZATION) \ - $(TEST_DOCKER_RESOURCE_CONSTRAINTS) \ - $(TEST_MODCACHE_VOL) \ - $(TEST_BUILDCACHE_VOL) \ - -v $(MAIN_GOPATH)/bin/linux_amd64/:/go/bin \ - -v $(shell pwd):/consul \ - $(GO_BUILD_TAG) \ - make test-internal - -other-consul: +.PHONY: other-consul +other-consul: ## Checking for other consul instances @echo "--> Checking for other consul instances" @if ps -ef | grep 'consul agent' | grep -v grep ; then \ echo "Found other running consul agents. This may affect your tests." ; \ exit 1 ; \ fi - -.PHONY: fmt -fmt: $(foreach mod,$(GO_MODULES),fmt/$(mod)) -.PHONY: fmt/% -fmt/%: - @echo "--> Running go fmt ($*)" - @cd $* && gofmt -s -l -w . +# Use GO_TEST_FLAGS to run specific tests: +# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic" +# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment. +.PHONY: test-envoy-integ +test-envoy-integ: $(ENVOY_INTEG_DEPS) ## Run integration tests. + @go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy -.PHONY: lint -lint: $(foreach mod,$(GO_MODULES),lint/$(mod)) lint-container-test-deps +# NOTE: Use DOCKER_BUILDKIT=0, if docker build fails to resolve consul:local base image +.PHONY: test-compat-integ-setup +test-compat-integ-setup: dev-docker + @docker tag consul-dev:latest $(CONSUL_COMPAT_TEST_IMAGE):local + @docker run --rm -t $(CONSUL_COMPAT_TEST_IMAGE):local consul version + @# 'consul-envoy:target-version' is needed by compatibility integ test + @docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=$(CONSUL_COMPAT_TEST_IMAGE):local --build-arg ENVOY_VERSION=${ENVOY_VERSION} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets -.PHONY: lint/% -lint/%: - @echo "--> Running golangci-lint ($*)" - @cd $* && GOWORK=off golangci-lint run --build-tags '$(GOTAGS)' - @echo "--> Running lint-consul-retry ($*)" - @cd $* && GOWORK=off lint-consul-retry - @echo "--> Running enumcover ($*)" - @cd $* && GOWORK=off enumcover ./... +.PHONY: test-compat-integ +test-compat-integ: test-compat-integ-setup ## Test compat integ +ifeq ("$(GOTESTSUM_PATH)","") + @cd ./test/integration/consul-container && \ + go test \ + -v \ + -timeout=30m \ + ./... \ + --tags $(GOTAGS) \ + --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --target-version local \ + --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --latest-version latest +else + @cd ./test/integration/consul-container && \ + gotestsum \ + --format=short-verbose \ + --debug \ + --rerun-fails=3 \ + --packages="./..." \ + -- \ + --tags $(GOTAGS) \ + -timeout=30m \ + ./... \ + --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --target-version local \ + --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --latest-version latest +endif -# check that the test-container module only imports allowlisted packages -# from the root consul module. Generally we don't want to allow these imports. -# In a few specific instances though it is okay to import test definitions and -# helpers from some of the packages in the root module. -.PHONY: lint-container-test-deps -lint-container-test-deps: - @echo "--> Checking container tests for bad dependencies" - @cd test/integration/consul-container && \ - $(CURDIR)/build-support/scripts/check-allowed-imports.sh \ - github.com/hashicorp/consul \ - internal/catalog/catalogtest +.PHONY: test-metrics-integ +test-metrics-integ: test-compat-integ-setup ## Test metrics integ + @cd ./test/integration/consul-container && \ + go test -v -timeout=7m ./test/metrics \ + --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --target-version local \ + --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --latest-version latest -# Build the static web ui inside a Docker container. For local testing only; do not commit these assets. -ui: ui-docker +.PHONY: test-connect-ca-providers +test-connect-ca-providers: ## Running /agent/connect/ca tests in verbose mode + @echo "Running /agent/connect/ca tests in verbose mode" + @go test -v ./agent/connect/ca + @go test -v ./agent/consul -run Vault + @go test -v ./agent -run Vault + +##@ UI + +.PHONY: ui +ui: ui-docker ## Build the static web ui inside a Docker container. For local testing only; do not commit these assets. -# Build the static web ui with yarn. This is the version to commit. .PHONY: ui-regen -ui-regen: +ui-regen: ## Build the static web ui with yarn. This is the version to commit. cd $(CURDIR)/ui && make && cd .. rm -rf $(CURDIR)/agent/uiserver/dist mv $(CURDIR)/ui/packages/consul-ui/dist $(CURDIR)/agent/uiserver/ -tools: +.PHONY: ui-build-image +ui-build-image: ## Building UI build container + @echo "Building UI build container" + @docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) - < build-support/docker/Build-UI.dockerfile + +.PHONY: ui-docker +ui-docker: ui-build-image ## Builds ui within docker container and copy all the relevant artifacts out of the containers back to the source + @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui + +##@ Tools + +.PHONY: tools +tools: ## Installs various supporting Go tools. @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh .PHONY: lint-tools -lint-tools: +lint-tools: ## Install tools for linting @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -lint -.PHONY: proto-tools -proto-tools: - @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -protobuf - .PHONY: codegen-tools -codegen-tools: +codegen-tools: ## Install tools for codegen @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -codegen .PHONY: deep-copy -deep-copy: codegen-tools +deep-copy: codegen-tools ## Deep copy @$(SHELL) $(CURDIR)/agent/structs/deep-copy.sh @$(SHELL) $(CURDIR)/agent/proxycfg/deep-copy.sh @$(SHELL) $(CURDIR)/agent/consul/state/deep-copy.sh -version: +print-% : ; @echo $($*) ## utility to echo a makefile variable (i.e. 'make print-GOPATH') + +.PHONY: module-versions +module-versions: ## Print a list of modules which can be updated. Columns are: module current_version date_of_current_version latest_version + @go list -m -u -f '{{if .Update}} {{printf "%-50v %-40s" .Path .Version}} {{with .Time}} {{ .Format "2006-01-02" -}} {{else}} {{printf "%9s" ""}} {{end}} {{ .Update.Version}} {{end}}' all + +.PHONY: docs +docs: ## Point your web browser to http://localhost:3000/consul to live render docs from ./website/ + make -C website + +##@ Release + +.PHONY: version +version: ## Current Consul version @echo -n "Version: " @$(SHELL) $(CURDIR)/build-support/scripts/version.sh @echo -n "Version + release: " @@ -411,26 +455,20 @@ version: @echo -n "Version + release + git: " @$(SHELL) $(CURDIR)/build-support/scripts/version.sh -r -g - +.PHONY: docker-images docker-images: go-build-image ui-build-image -go-build-image: +.PHONY: go-build-image +go-build-image: ## Building Golang build container @echo "Building Golang build container" @docker build $(NOCACHE) $(QUIET) -t $(GO_BUILD_TAG) - < build-support/docker/Build-Go.dockerfile -ui-build-image: - @echo "Building UI build container" - @docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) - < build-support/docker/Build-UI.dockerfile - -# Builds consul in a docker container and then dumps executable into ./pkg/bin/... -consul-docker: go-build-image +.PHONY: consul-docker +consul-docker: go-build-image ## Builds consul in a docker container and then dumps executable into ./pkg/bin/... @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh consul -ui-docker: ui-build-image - @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui - -# Build image used to run integration tests locally. -docker-envoy-integ: +.PHONY: docker-envoy-integ +docker-envoy-integ: ## Build image used to run integration tests locally. $(MAKE) GOARCH=amd64 linux docker build \ --platform linux/amd64 $(NOCACHE) $(QUIET) \ @@ -439,75 +477,21 @@ docker-envoy-integ: $(CURDIR)/pkg/bin/linux_amd64 \ -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile -# Run integration tests. -# Use GO_TEST_FLAGS to run specific tests: -# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic" -# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment. -test-envoy-integ: $(ENVOY_INTEG_DEPS) - @go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy - -.PHONY: test-compat-integ -test-compat-integ: test-compat-integ-setup -ifeq ("$(GOTESTSUM_PATH)","") - @cd ./test/integration/consul-container && \ - go test \ - -v \ - -timeout=30m \ - ./... \ - --tags $(GOTAGS) \ - --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --target-version local \ - --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --latest-version latest -else - @cd ./test/integration/consul-container && \ - gotestsum \ - --format=short-verbose \ - --debug \ - --rerun-fails=3 \ - --packages="./..." \ - -- \ - --tags $(GOTAGS) \ - -timeout=30m \ - ./... \ - --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --target-version local \ - --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --latest-version latest -endif - -# NOTE: Use DOCKER_BUILDKIT=0, if docker build fails to resolve consul:local base image -.PHONY: test-compat-integ-setup -test-compat-integ-setup: dev-docker - @docker tag consul-dev:latest $(CONSUL_COMPAT_TEST_IMAGE):local - @docker run --rm -t $(CONSUL_COMPAT_TEST_IMAGE):local consul version - @# 'consul-envoy:target-version' is needed by compatibility integ test - @docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=$(CONSUL_COMPAT_TEST_IMAGE):local --build-arg ENVOY_VERSION=${ENVOY_VERSION} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - -.PHONY: test-metrics-integ -test-metrics-integ: test-compat-integ-setup - @cd ./test/integration/consul-container && \ - go test -v -timeout=7m ./test/metrics \ - --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --target-version local \ - --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --latest-version latest - -test-connect-ca-providers: - @echo "Running /agent/connect/ca tests in verbose mode" - @go test -v ./agent/connect/ca - @go test -v ./agent/consul -run Vault - @go test -v ./agent -run Vault +##@ Proto .PHONY: proto -proto: proto-tools proto-gen proto-mocks +proto: proto-tools proto-gen proto-mocks ## Protobuf setup command + +.PHONY: proto-tools +proto-tools: ## Install tools for protobuf + @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -protobuf .PHONY: proto-gen -proto-gen: proto-tools +proto-gen: proto-tools ## Regenerates all Go files from protobuf definitions @$(SHELL) $(CURDIR)/build-support/scripts/protobuf.sh .PHONY: proto-mocks -proto-mocks: +proto-mocks: ## Proto mocks for dir in $(MOCKED_PB_DIRS) ; do \ cd proto-public && \ rm -f $$dir/mock*.go && \ @@ -515,11 +499,11 @@ proto-mocks: done .PHONY: proto-format -proto-format: proto-tools +proto-format: proto-tools ## Proto format @buf format -w .PHONY: proto-lint -proto-lint: proto-tools +proto-lint: proto-tools ## Proto lint @buf lint @for fn in $$(find proto -name '*.proto'); do \ if [[ "$$fn" = "proto/private/pbsubscribe/subscribe.proto" ]]; then \ @@ -534,21 +518,14 @@ proto-lint: proto-tools fi \ done -# utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION') -print-% : ; @echo $($*) - -.PHONY: module-versions -# Print a list of modules which can be updated. -# Columns are: module current_version date_of_current_version latest_version -module-versions: - @go list -m -u -f '{{if .Update}} {{printf "%-50v %-40s" .Path .Version}} {{with .Time}} {{ .Format "2006-01-02" -}} {{else}} {{printf "%9s" ""}} {{end}} {{ .Update.Version}} {{end}}' all +##@ Envoy .PHONY: envoy-library -envoy-library: +envoy-library: ## Ensures that all of the protobuf packages present in the github.com/envoyproxy/go-control-plane library are referenced in the consul codebase @$(SHELL) $(CURDIR)/build-support/scripts/envoy-library-references.sh .PHONY: envoy-regen -envoy-regen: +envoy-regen: ## Regenerating envoy golden files $(info regenerating envoy golden files) @for d in endpoints listeners routes clusters rbac; do \ if [[ -d "agent/xds/testdata/$${d}" ]]; then \ @@ -559,17 +536,18 @@ envoy-regen: @find "command/connect/envoy/testdata" -name '*.golden' -delete @go test -tags '$(GOTAGS)' ./command/connect/envoy -update -# Point your web browser to http://localhost:3000/consul to live render docs from ./website/ -.PHONY: docs -docs: - make -C website - +##@ Help + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php .PHONY: help -help: - $(info available make targets) - $(info ----------------------) - @grep "^[a-z0-9-][a-z0-9.-]*:" GNUmakefile | cut -d':' -f1 | sort - -.PHONY: all bin dev dist cov test test-internal cover lint ui tools -.PHONY: docker-images go-build-image ui-build-image consul-docker ui-docker -.PHONY: version test-envoy-integ +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/build-support/scripts/envoy-library-references.sh b/build-support/scripts/envoy-library-references.sh index 7f5413bbf78f..bca15806c7e4 100644 --- a/build-support/scripts/envoy-library-references.sh +++ b/build-support/scripts/envoy-library-references.sh @@ -23,7 +23,7 @@ unset CDPATH cd "$(dirname "$0")" # build-support/scripts cd ../.. # -if [[ ! -f GNUmakefile ]] || [[ ! -f go.mod ]]; then +if [[ ! -f Makefile ]] || [[ ! -f go.mod ]]; then echo "not in root consul checkout: ${PWD}" >&2 exit 1 fi diff --git a/docs/README.md b/docs/README.md index 8bebb848c9b1..91d7b6d7d927 100644 --- a/docs/README.md +++ b/docs/README.md @@ -55,7 +55,7 @@ contain other important source related to Consul. * [.changelog] contains markdown files that are used by [hashicorp/go-changelog] to produce the [CHANGELOG.md]. * [build-support] contains bash functions and scripts used to automate. - development tasks. Generally these scripts are called from the [GNUmakefile]. + development tasks. Generally these scripts are called from the [Makefile]. * [grafana] contains the source for a [Grafana dashboard] that can be used to monitor Consul. @@ -66,7 +66,7 @@ contain other important source related to Consul. [hashicorp/go-changelog]: https://github.com/hashicorp/go-changelog [CHANGELOG.md]: https://github.com/hashicorp/consul/blob/main/CHANGELOG.md [build-support]: https://github.com/hashicorp/consul/tree/main/build-support -[GNUmakefile]: https://github.com/hashicorp/consul/tree/main/GNUmakefile +[Makefile]: https://github.com/hashicorp/consul/tree/main/Makefile [Grafana dashboard]: https://grafana.com/grafana/dashboards [grafana]: https://github.com/hashicorp/consul/tree/main/grafana From 62005369b54c63ccd7842c001de740ff3795d02c Mon Sep 17 00:00:00 2001 From: Nick Irvine <115657443+nfi-hashicorp@users.noreply.github.com> Date: Mon, 17 Jul 2023 15:15:22 -0700 Subject: [PATCH 42/43] =?UTF-8?q?Add=20`testing/deployer`=20(ne=C3=A9=20`c?= =?UTF-8?q?onsul-topology`)=20[NET-4610]=20(#17823)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer Co-authored-by: Freddy --- .github/workflows/reusable-lint.yml | 1 + testing/deployer/.gitignore | 4 + testing/deployer/README.md | 179 ++++ testing/deployer/TODO.md | 9 + testing/deployer/go.mod | 44 + testing/deployer/go.sum | 241 ++++++ testing/deployer/sprawl/acl.go | 332 ++++++++ testing/deployer/sprawl/acl_rules.go | 160 ++++ testing/deployer/sprawl/boot.go | 520 ++++++++++++ testing/deployer/sprawl/catalog.go | 425 ++++++++++ testing/deployer/sprawl/configentries.go | 58 ++ testing/deployer/sprawl/consul.go | 98 +++ testing/deployer/sprawl/debug.go | 8 + testing/deployer/sprawl/details.go | 170 ++++ testing/deployer/sprawl/ent.go | 174 ++++ testing/deployer/sprawl/helpers.go | 11 + .../deployer/sprawl/internal/build/docker.go | 83 ++ .../deployer/sprawl/internal/runner/exec.go | 120 +++ .../deployer/sprawl/internal/secrets/store.go | 70 ++ .../deployer/sprawl/internal/tfgen/agent.go | 215 +++++ .../deployer/sprawl/internal/tfgen/digest.go | 45 + testing/deployer/sprawl/internal/tfgen/dns.go | 180 ++++ .../deployer/sprawl/internal/tfgen/docker.go | 39 + .../sprawl/internal/tfgen/docker_test.go | 15 + testing/deployer/sprawl/internal/tfgen/gen.go | 475 +++++++++++ testing/deployer/sprawl/internal/tfgen/io.go | 70 ++ .../deployer/sprawl/internal/tfgen/nodes.go | 249 ++++++ .../deployer/sprawl/internal/tfgen/prelude.go | 16 + .../deployer/sprawl/internal/tfgen/proxy.go | 87 ++ testing/deployer/sprawl/internal/tfgen/res.go | 95 +++ .../templates/container-app-dataplane.tf.tmpl | 29 + .../templates/container-app-sidecar.tf.tmpl | 31 + .../tfgen/templates/container-app.tf.tmpl | 25 + .../tfgen/templates/container-consul.tf.tmpl | 40 + .../tfgen/templates/container-coredns.tf.tmpl | 28 + .../tfgen/templates/container-mgw.tf.tmpl | 25 + .../tfgen/templates/container-pause.tf.tmpl | 38 + .../tfgen/templates/container-proxy.tf.tmpl | 33 + .../deployer/sprawl/internal/tfgen/tfgen.go | 15 + testing/deployer/sprawl/peering.go | 165 ++++ testing/deployer/sprawl/sprawl.go | 464 +++++++++++ .../deployer/sprawl/sprawltest/sprawltest.go | 202 +++++ .../deployer/sprawl/sprawltest/test_test.go | 180 ++++ testing/deployer/sprawl/tls.go | 114 +++ testing/deployer/topology/compile.go | 671 +++++++++++++++ testing/deployer/topology/default_cdp.go | 3 + testing/deployer/topology/default_consul.go | 4 + testing/deployer/topology/default_envoy.go | 3 + testing/deployer/topology/ids.go | 142 ++++ testing/deployer/topology/images.go | 123 +++ testing/deployer/topology/images_test.go | 98 +++ testing/deployer/topology/topology.go | 787 ++++++++++++++++++ testing/deployer/topology/util.go | 17 + testing/deployer/topology/util_test.go | 11 + testing/deployer/util/consul.go | 63 ++ testing/deployer/util/files.go | 57 ++ .../deployer/util/internal/ipamutils/doc.go | 21 + .../deployer/util/internal/ipamutils/utils.go | 117 +++ .../util/internal/ipamutils/utils_test.go | 102 +++ testing/deployer/util/net.go | 17 + 60 files changed, 7818 insertions(+) create mode 100644 testing/deployer/.gitignore create mode 100644 testing/deployer/README.md create mode 100644 testing/deployer/TODO.md create mode 100644 testing/deployer/go.mod create mode 100644 testing/deployer/go.sum create mode 100644 testing/deployer/sprawl/acl.go create mode 100644 testing/deployer/sprawl/acl_rules.go create mode 100644 testing/deployer/sprawl/boot.go create mode 100644 testing/deployer/sprawl/catalog.go create mode 100644 testing/deployer/sprawl/configentries.go create mode 100644 testing/deployer/sprawl/consul.go create mode 100644 testing/deployer/sprawl/debug.go create mode 100644 testing/deployer/sprawl/details.go create mode 100644 testing/deployer/sprawl/ent.go create mode 100644 testing/deployer/sprawl/helpers.go create mode 100644 testing/deployer/sprawl/internal/build/docker.go create mode 100644 testing/deployer/sprawl/internal/runner/exec.go create mode 100644 testing/deployer/sprawl/internal/secrets/store.go create mode 100644 testing/deployer/sprawl/internal/tfgen/agent.go create mode 100644 testing/deployer/sprawl/internal/tfgen/digest.go create mode 100644 testing/deployer/sprawl/internal/tfgen/dns.go create mode 100644 testing/deployer/sprawl/internal/tfgen/docker.go create mode 100644 testing/deployer/sprawl/internal/tfgen/docker_test.go create mode 100644 testing/deployer/sprawl/internal/tfgen/gen.go create mode 100644 testing/deployer/sprawl/internal/tfgen/io.go create mode 100644 testing/deployer/sprawl/internal/tfgen/nodes.go create mode 100644 testing/deployer/sprawl/internal/tfgen/prelude.go create mode 100644 testing/deployer/sprawl/internal/tfgen/proxy.go create mode 100644 testing/deployer/sprawl/internal/tfgen/res.go create mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-app-dataplane.tf.tmpl create mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-app-sidecar.tf.tmpl create mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-app.tf.tmpl create mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-consul.tf.tmpl create mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-coredns.tf.tmpl create mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-mgw.tf.tmpl create mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-pause.tf.tmpl create mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-proxy.tf.tmpl create mode 100644 testing/deployer/sprawl/internal/tfgen/tfgen.go create mode 100644 testing/deployer/sprawl/peering.go create mode 100644 testing/deployer/sprawl/sprawl.go create mode 100644 testing/deployer/sprawl/sprawltest/sprawltest.go create mode 100644 testing/deployer/sprawl/sprawltest/test_test.go create mode 100644 testing/deployer/sprawl/tls.go create mode 100644 testing/deployer/topology/compile.go create mode 100644 testing/deployer/topology/default_cdp.go create mode 100644 testing/deployer/topology/default_consul.go create mode 100644 testing/deployer/topology/default_envoy.go create mode 100644 testing/deployer/topology/ids.go create mode 100644 testing/deployer/topology/images.go create mode 100644 testing/deployer/topology/images_test.go create mode 100644 testing/deployer/topology/topology.go create mode 100644 testing/deployer/topology/util.go create mode 100644 testing/deployer/topology/util_test.go create mode 100644 testing/deployer/util/consul.go create mode 100644 testing/deployer/util/files.go create mode 100644 testing/deployer/util/internal/ipamutils/doc.go create mode 100644 testing/deployer/util/internal/ipamutils/utils.go create mode 100644 testing/deployer/util/internal/ipamutils/utils_test.go create mode 100644 testing/deployer/util/net.go diff --git a/.github/workflows/reusable-lint.yml b/.github/workflows/reusable-lint.yml index 9a9a26f0267e..0c87e3f03612 100644 --- a/.github/workflows/reusable-lint.yml +++ b/.github/workflows/reusable-lint.yml @@ -34,6 +34,7 @@ jobs: - "envoyextensions" - "troubleshoot" - "test/integration/consul-container" + - "testing/deployer" fail-fast: true name: lint ${{ matrix.directory }} steps: diff --git a/testing/deployer/.gitignore b/testing/deployer/.gitignore new file mode 100644 index 000000000000..5d18603464e6 --- /dev/null +++ b/testing/deployer/.gitignore @@ -0,0 +1,4 @@ +/terraform +/workdir +/sample-cli +workdir diff --git a/testing/deployer/README.md b/testing/deployer/README.md new file mode 100644 index 000000000000..604bbdb08781 --- /dev/null +++ b/testing/deployer/README.md @@ -0,0 +1,179 @@ +[![GoDoc](https://pkg.go.dev/badge/github.com/hashicorp/consul/testing/deployer)](https://pkg.go.dev/github.com/hashicorp/consul/testing/deployer) + +## Summary + +This is a Go library used to launch one or more Consul clusters that can be +peered using the cluster peering feature. Under the covers `terraform` is used +in conjunction with the +[`kreuzwerker/docker`](https://registry.terraform.io/providers/kreuzwerker/docker/latest) +provider to manage a fleet of local docker containers and networks. + +### Configuration + +The complete topology of Consul clusters is defined using a topology.Config +which allows you to define a set of networks and reference those networks when +assigning nodes and services to clusters. Both Consul clients and +`consul-dataplane` instances are supported. + +Here is an example configuration with two peered clusters: + +``` +cfg := &topology.Config{ + Networks: []*topology.Network{ + {Name: "dc1"}, + {Name: "dc2"}, + {Name: "wan", Type: "wan"}, + }, + Clusters: []*topology.Cluster{ + { + Name: "dc1", + Nodes: []*topology.Node{ + { + Kind: topology.NodeKindServer, + Name: "dc1-server1", + Addresses: []*topology.Address{ + {Network: "dc1"}, + {Network: "wan"}, + }, + }, + { + Kind: topology.NodeKindClient, + Name: "dc1-client1", + Services: []*topology.Service{ + { + ID: topology.ServiceID{Name: "mesh-gateway"}, + Port: 8443, + EnvoyAdminPort: 19000, + IsMeshGateway: true, + }, + }, + }, + { + Kind: topology.NodeKindClient, + Name: "dc1-client2", + Services: []*topology.Service{ + { + ID: topology.ServiceID{Name: "ping"}, + Image: "rboyer/pingpong:latest", + Port: 8080, + EnvoyAdminPort: 19000, + Command: []string{ + "-bind", "0.0.0.0:8080", + "-dial", "127.0.0.1:9090", + "-pong-chaos", + "-dialfreq", "250ms", + "-name", "ping", + }, + Upstreams: []*topology.Upstream{{ + ID: topology.ServiceID{Name: "pong"}, + LocalPort: 9090, + Peer: "peer-dc2-default", + }}, + }, + }, + }, + }, + InitialConfigEntries: []api.ConfigEntry{ + &api.ExportedServicesConfigEntry{ + Name: "default", + Services: []api.ExportedService{{ + Name: "ping", + Consumers: []api.ServiceConsumer{{ + Peer: "peer-dc2-default", + }}, + }}, + }, + }, + }, + { + Name: "dc2", + Nodes: []*topology.Node{ + { + Kind: topology.NodeKindServer, + Name: "dc2-server1", + Addresses: []*topology.Address{ + {Network: "dc2"}, + {Network: "wan"}, + }, + }, + { + Kind: topology.NodeKindClient, + Name: "dc2-client1", + Services: []*topology.Service{ + { + ID: topology.ServiceID{Name: "mesh-gateway"}, + Port: 8443, + EnvoyAdminPort: 19000, + IsMeshGateway: true, + }, + }, + }, + { + Kind: topology.NodeKindDataplane, + Name: "dc2-client2", + Services: []*topology.Service{ + { + ID: topology.ServiceID{Name: "pong"}, + Image: "rboyer/pingpong:latest", + Port: 8080, + EnvoyAdminPort: 19000, + Command: []string{ + "-bind", "0.0.0.0:8080", + "-dial", "127.0.0.1:9090", + "-pong-chaos", + "-dialfreq", "250ms", + "-name", "pong", + }, + Upstreams: []*topology.Upstream{{ + ID: topology.ServiceID{Name: "ping"}, + LocalPort: 9090, + Peer: "peer-dc1-default", + }}, + }, + }, + }, + }, + InitialConfigEntries: []api.ConfigEntry{ + &api.ExportedServicesConfigEntry{ + Name: "default", + Services: []api.ExportedService{{ + Name: "ping", + Consumers: []api.ServiceConsumer{{ + Peer: "peer-dc2-default", + }}, + }}, + }, + }, + }, + }, + Peerings: []*topology.Peering{{ + Dialing: topology.PeerCluster{ + Name: "dc1", + }, + Accepting: topology.PeerCluster{ + Name: "dc2", + }, + }}, +} +``` + +Once you have a topology configuration, you simply call the appropriate +`Launch` function to validate and boot the cluster. + +You may also modify your original configuration (in some allowed ways) and call +`Relaunch` on an existing topology which will differentially adjust the running +infrastructure. This can be useful to do things like upgrade instances in place +or subly reconfigure them. + +### For Testing + +It is meant to be consumed primarily by unit tests desiring a complex +reasonably realistic Consul setup. For that use case use the `sprawl/sprawltest` wrapper: + +``` +func TestSomething(t *testing.T) { + cfg := &topology.Config{...} + sp := sprawltest.Launch(t, cfg) + // do stuff with 'sp' +} +``` \ No newline at end of file diff --git a/testing/deployer/TODO.md b/testing/deployer/TODO.md new file mode 100644 index 000000000000..2d651cd16295 --- /dev/null +++ b/testing/deployer/TODO.md @@ -0,0 +1,9 @@ +Missing things that should probably be added; + +- consul-dataplane support for running mesh gateways +- consul-dataplane health check updates (automatic; manual) +- ServerExternalAddresses in a peering; possibly rig up a DNS name for this. +- after creating a token, verify it exists on all servers before proceding (rather than sleep looping on not-founds) +- investigate strange gRPC bug that is currently papered over +- allow services to override their mesh gateway modes +- remove some of the debug prints of various things diff --git a/testing/deployer/go.mod b/testing/deployer/go.mod new file mode 100644 index 000000000000..1f9fa9ffce66 --- /dev/null +++ b/testing/deployer/go.mod @@ -0,0 +1,44 @@ +module github.com/hashicorp/consul/testing/deployer + +go 1.20 + +require ( + github.com/google/go-cmp v0.5.9 + github.com/hashicorp/consul/api v1.20.0 + github.com/hashicorp/consul/sdk v0.13.1 + github.com/hashicorp/go-cleanhttp v0.5.2 + github.com/hashicorp/go-hclog v1.5.0 + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/hcl/v2 v2.16.2 + github.com/mitchellh/copystructure v1.2.0 + github.com/rboyer/safeio v0.2.2 + github.com/stretchr/testify v1.8.2 + golang.org/x/crypto v0.7.0 +) + +require ( + github.com/agext/levenshtein v1.2.1 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/armon/go-metrics v0.3.10 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/go-version v1.2.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/zclconf/go-cty v1.12.1 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/testing/deployer/go.sum b/testing/deployer/go.sum new file mode 100644 index 000000000000..f0997b372682 --- /dev/null +++ b/testing/deployer/go.sum @@ -0,0 +1,241 @@ +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= +github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= +github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= +github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0= +github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rboyer/safeio v0.2.2 h1:XhtqyUTRleMYGyBt3ni4j2BtEh669U2ry2INnnd+B4k= +github.com/rboyer/safeio v0.2.2/go.mod h1:pSnr2LFXyn/c/fotxotyOdYy7pP/XSh6MpBmzXPjiNc= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= +github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/testing/deployer/sprawl/acl.go b/testing/deployer/sprawl/acl.go new file mode 100644 index 000000000000..54f9c9a98a01 --- /dev/null +++ b/testing/deployer/sprawl/acl.go @@ -0,0 +1,332 @@ +package sprawl + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/consul/api" + + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets" + "github.com/hashicorp/consul/testing/deployer/topology" +) + +// TODO: fix this by checking that a token/policy works on ALL servers before +// returning from create. +func isACLNotFound(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), `ACL not found`) +} + +func (s *Sprawl) bootstrapACLs(cluster string) error { + var ( + client = s.clients[cluster] + logger = s.logger.With("cluster", cluster) + mgmtToken = s.secrets.ReadGeneric(cluster, secrets.BootstrapToken) + ) + + ac := client.ACL() + + if mgmtToken != "" { + NOT_BOOTED: + ready, err := s.isACLBootstrapped(cluster, client) + if err != nil { + return fmt.Errorf("error checking if the acl system is bootstrapped: %w", err) + } else if !ready { + logger.Warn("ACL system is not ready yet") + time.Sleep(250 * time.Millisecond) + goto NOT_BOOTED + } + + TRYAGAIN: + // check to see if it works + _, _, err = ac.TokenReadSelf(&api.QueryOptions{Token: mgmtToken}) + if err != nil { + if isACLNotBootstrapped(err) { + logger.Warn("system is rebooting", "error", err) + time.Sleep(250 * time.Millisecond) + goto TRYAGAIN + } + + return fmt.Errorf("management token no longer works: %w", err) + } + + logger.Info("current management token", "token", mgmtToken) + return nil + } + +TRYAGAIN2: + logger.Info("bootstrapping ACLs") + tok, _, err := ac.Bootstrap() + if err != nil { + if isACLNotBootstrapped(err) { + logger.Warn("system is rebooting", "error", err) + time.Sleep(250 * time.Millisecond) + goto TRYAGAIN2 + } + return err + } + mgmtToken = tok.SecretID + s.secrets.SaveGeneric(cluster, secrets.BootstrapToken, mgmtToken) + + logger.Info("current management token", "token", mgmtToken) + + return nil + +} + +func isACLNotBootstrapped(err error) bool { + switch { + case strings.Contains(err.Error(), "ACL system must be bootstrapped before making any requests that require authorization"): + return true + case strings.Contains(err.Error(), "The ACL system is currently in legacy mode"): + return true + } + return false +} + +func (s *Sprawl) isACLBootstrapped(cluster string, client *api.Client) (bool, error) { + policy, _, err := client.ACL().PolicyReadByName("global-management", &api.QueryOptions{ + Token: s.secrets.ReadGeneric(cluster, secrets.BootstrapToken), + }) + if err != nil { + if strings.Contains(err.Error(), "Unexpected response code: 403 (ACL not found)") { + return false, nil + } else if isACLNotBootstrapped(err) { + return false, nil + } + return false, err + } + return policy != nil, nil +} + +func (s *Sprawl) createAnonymousToken(cluster *topology.Cluster) error { + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + if err := s.createAnonymousPolicy(cluster); err != nil { + return err + } + + token, err := CreateOrUpdateToken(client, anonymousToken()) + if err != nil { + return err + } + + logger.Info("created anonymous token", + "token", token.SecretID, + ) + + return nil +} + +func (s *Sprawl) createAnonymousPolicy(cluster *topology.Cluster) error { + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + op, err := CreateOrUpdatePolicy(client, anonymousPolicy(cluster.Enterprise)) + if err != nil { + return err + } + + logger.Info("created anonymous policy", + "policy-name", op.Name, + "policy-id", op.ID, + ) + + return nil +} + +func (s *Sprawl) createAgentTokens(cluster *topology.Cluster) error { + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + for _, node := range cluster.Nodes { + // NOTE: always create tokens even for disabled nodes. + if !node.IsAgent() { + continue + } + + if tok := s.secrets.ReadAgentToken(cluster.Name, node.ID()); tok == "" { + token, err := CreateOrUpdateToken(client, tokenForNode(node, cluster.Enterprise)) + if err != nil { + return err + } + + logger.Info("created agent token", + "node", node.ID(), + "token", token.SecretID, + ) + + s.secrets.SaveAgentToken(cluster.Name, node.ID(), token.SecretID) + } + } + + return nil +} + +// Create a policy to allow super permissive catalog reads across namespace +// boundaries. +func (s *Sprawl) createCrossNamespaceCatalogReadPolicies(cluster *topology.Cluster, partition string) error { + if !cluster.Enterprise { + return nil + } + + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + op, err := CreateOrUpdatePolicy(client, policyForCrossNamespaceRead(partition)) + if err != nil { + return err + } + + logger.Info("created cross-ns-catalog-read policy", + "policy-name", op.Name, + "policy-id", op.ID, + "partition", partition, + ) + + return nil +} + +func (s *Sprawl) createAllServiceTokens() error { + for _, cluster := range s.topology.Clusters { + if err := s.createServiceTokens(cluster); err != nil { + return fmt.Errorf("createServiceTokens[%s]: %w", cluster.Name, err) + } + } + return nil +} + +func (s *Sprawl) createServiceTokens(cluster *topology.Cluster) error { + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + sids := make(map[topology.ServiceID]struct{}) + for _, node := range cluster.Nodes { + if !node.RunsWorkloads() || len(node.Services) == 0 || node.Disabled { + continue + } + + for _, svc := range node.Services { + sid := svc.ID + + if _, done := sids[sid]; done { + continue + } + + var overridePolicy *api.ACLPolicy + if svc.IsMeshGateway { + var err error + overridePolicy, err = CreateOrUpdatePolicy(client, policyForMeshGateway(svc, cluster.Enterprise)) + if err != nil { + return fmt.Errorf("could not create policy: %w", err) + } + } + + token, err := CreateOrUpdateToken(client, tokenForService(svc, overridePolicy, cluster.Enterprise)) + if err != nil { + return fmt.Errorf("could not create token: %w", err) + } + + logger.Info("created service token", + "service", svc.ID.Name, + "namespace", svc.ID.Namespace, + "partition", svc.ID.Partition, + "token", token.SecretID, + ) + + s.secrets.SaveServiceToken(cluster.Name, sid, token.SecretID) + + sids[sid] = struct{}{} + } + } + + return nil +} + +func CreateOrUpdateToken(client *api.Client, t *api.ACLToken) (*api.ACLToken, error) { + ac := client.ACL() + + currentToken, err := getTokenByDescription(client, t.Description, &api.QueryOptions{ + Partition: t.Partition, + Namespace: t.Namespace, + }) + if err != nil { + return nil, err + } else if currentToken != nil { + t.AccessorID = currentToken.AccessorID + t.SecretID = currentToken.SecretID + } + + if t.AccessorID != "" { + t, _, err = ac.TokenUpdate(t, nil) + } else { + t, _, err = ac.TokenCreate(t, nil) + } + if err != nil { + return nil, err + } + return t, nil +} + +func getTokenByDescription(client *api.Client, description string, opts *api.QueryOptions) (*api.ACLToken, error) { + ac := client.ACL() + tokens, _, err := ac.TokenList(opts) + if err != nil { + return nil, err + } + + for _, tokenEntry := range tokens { + if tokenEntry.Description == description { + token, _, err := ac.TokenRead(tokenEntry.AccessorID, opts) + if err != nil { + return nil, err + } + + return token, nil + } + } + return nil, nil +} + +func CreateOrUpdatePolicy(client *api.Client, p *api.ACLPolicy) (*api.ACLPolicy, error) { + ac := client.ACL() + + currentPolicy, _, err := ac.PolicyReadByName(p.Name, &api.QueryOptions{ + Partition: p.Partition, + Namespace: p.Namespace, + }) + + // There is a quirk about Consul 1.14.x, where: if reading a policy yields + // an empty result, we return "ACL not found". It's safe to ignore this here, + // because if the Client's ACL token truly doesn't exist, then the create fails below. + if err != nil && !strings.Contains(err.Error(), "ACL not found") { + return nil, err + } else if currentPolicy != nil { + p.ID = currentPolicy.ID + } + + if p.ID != "" { + p, _, err = ac.PolicyUpdate(p, nil) + } else { + p, _, err = ac.PolicyCreate(p, nil) + } + + if err != nil { + return nil, err + } + return p, nil +} diff --git a/testing/deployer/sprawl/acl_rules.go b/testing/deployer/sprawl/acl_rules.go new file mode 100644 index 000000000000..b024ceab539e --- /dev/null +++ b/testing/deployer/sprawl/acl_rules.go @@ -0,0 +1,160 @@ +package sprawl + +import ( + "fmt" + + "github.com/hashicorp/consul/api" + + "github.com/hashicorp/consul/testing/deployer/topology" +) + +func policyForCrossNamespaceRead(partition string) *api.ACLPolicy { + return &api.ACLPolicy{ + Name: "cross-ns-catalog-read", + Description: "cross-ns-catalog-read", + Partition: partition, + Rules: fmt.Sprintf(` +partition %[1]q { + namespace_prefix "" { + node_prefix "" { policy = "read" } + service_prefix "" { policy = "read" } + } +} +`, partition), + } +} + +const anonymousTokenAccessorID = "00000000-0000-0000-0000-000000000002" + +func anonymousToken() *api.ACLToken { + return &api.ACLToken{ + AccessorID: anonymousTokenAccessorID, + // SecretID: "anonymous", + Description: "anonymous", + Local: false, + Policies: []*api.ACLTokenPolicyLink{ + { + Name: "anonymous", + }, + }, + } +} + +func anonymousPolicy(enterprise bool) *api.ACLPolicy { + p := &api.ACLPolicy{ + Name: "anonymous", + Description: "anonymous", + } + if enterprise { + p.Rules = ` +partition_prefix "" { + namespace_prefix "" { + node_prefix "" { policy = "read" } + service_prefix "" { policy = "read" } + } +} +` + } else { + p.Rules = ` +node_prefix "" { policy = "read" } +service_prefix "" { policy = "read" } +` + } + return p +} + +func tokenForNode(node *topology.Node, enterprise bool) *api.ACLToken { + nid := node.ID() + + tokenName := "agent--" + nid.ACLString() + + token := &api.ACLToken{ + Description: tokenName, + Local: false, + NodeIdentities: []*api.ACLNodeIdentity{{ + NodeName: node.PodName(), + Datacenter: node.Datacenter, + }}, + } + if enterprise { + token.Partition = node.Partition + token.Namespace = "default" + } + return token +} + +func tokenForService(svc *topology.Service, overridePolicy *api.ACLPolicy, enterprise bool) *api.ACLToken { + token := &api.ACLToken{ + Description: "service--" + svc.ID.ACLString(), + Local: false, + } + if overridePolicy != nil { + token.Policies = []*api.ACLTokenPolicyLink{{ID: overridePolicy.ID}} + } else { + token.ServiceIdentities = []*api.ACLServiceIdentity{{ + ServiceName: svc.ID.Name, + }} + } + + if enterprise { + token.Namespace = svc.ID.Namespace + token.Partition = svc.ID.Partition + } + + return token +} + +func policyForMeshGateway(svc *topology.Service, enterprise bool) *api.ACLPolicy { + policyName := "mesh-gateway--" + svc.ID.ACLString() + + policy := &api.ACLPolicy{ + Name: policyName, + Description: policyName, + } + if enterprise { + policy.Partition = svc.ID.Partition + policy.Namespace = "default" + } + + if enterprise { + policy.Rules = ` +namespace_prefix "" { + service "mesh-gateway" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } +} +agent_prefix "" { + policy = "read" +} +# for peering +mesh = "write" +peering = "read" +` + } else { + policy.Rules = ` +service "mesh-gateway" { + policy = "write" +} +service_prefix "" { + policy = "read" +} +node_prefix "" { + policy = "read" +} +agent_prefix "" { + policy = "read" +} +# for peering +mesh = "write" +peering = "read" +` + } + + return policy +} diff --git a/testing/deployer/sprawl/boot.go b/testing/deployer/sprawl/boot.go new file mode 100644 index 000000000000..415788726307 --- /dev/null +++ b/testing/deployer/sprawl/boot.go @@ -0,0 +1,520 @@ +package sprawl + +import ( + "context" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "strings" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-multierror" + + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/build" + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets" + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/tfgen" + "github.com/hashicorp/consul/testing/deployer/topology" + "github.com/hashicorp/consul/testing/deployer/util" +) + +const ( + sharedBootstrapToken = "root" + // sharedBootstrapToken = "ec59aa56-1996-4ff1-911a-f5d782552a13" + + sharedAgentRecoveryToken = "22082b05-05c9-4a0a-b3da-b9685ac1d688" +) + +func (s *Sprawl) launch() error { + return s.launchType(true) +} +func (s *Sprawl) relaunch() error { + return s.launchType(false) +} +func (s *Sprawl) launchType(firstTime bool) (launchErr error) { + if err := build.DockerImages(s.logger, s.runner, s.topology); err != nil { + return fmt.Errorf("build.DockerImages: %w", err) + } + + if firstTime { + // Initialize secrets the easy way for now (same in all clusters). + gossipKey, err := newGossipKey() + if err != nil { + return fmt.Errorf("newGossipKey: %w", err) + } + for _, cluster := range s.topology.Clusters { + s.secrets.SaveGeneric(cluster.Name, secrets.BootstrapToken, sharedBootstrapToken) + s.secrets.SaveGeneric(cluster.Name, secrets.AgentRecovery, sharedAgentRecoveryToken) + s.secrets.SaveGeneric(cluster.Name, secrets.GossipKey, gossipKey) + + // Give servers a copy of the bootstrap token for use as their agent tokens + // to avoid complicating the chicken/egg situation for startup. + for _, node := range cluster.Nodes { + if node.IsServer() { // include disabled + s.secrets.SaveAgentToken(cluster.Name, node.ID(), sharedBootstrapToken) + } + } + } + } + + var cleanupFuncs []func() + defer func() { + for i := len(cleanupFuncs) - 1; i >= 0; i-- { + cleanupFuncs[i]() + } + }() + + if firstTime { + var err error + s.generator, err = tfgen.NewGenerator( + s.logger.Named("tfgen"), + s.runner, + s.topology, + &s.secrets, + s.workdir, + s.license, + ) + if err != nil { + return err + } + } else { + s.generator.SetTopology(s.topology) + } + cleanupFuncs = append(cleanupFuncs, func() { + // Log the error before the cleanup so you don't have to wait to see + // the cause. + if launchErr != nil { + s.logger.Error("fatal error during launch", "error", launchErr) + } + + _ = s.generator.DestroyAllQuietly() + }) + + if firstTime { + // The networking phase is special. We have to pick a random subnet and + // hope. Once we have this established once it is immutable for future + // runs. + if err := s.initNetworkingAndVolumes(); err != nil { + return fmt.Errorf("initNetworkingAndVolumes: %w", err) + } + } + + if err := s.assignIPAddresses(); err != nil { + return fmt.Errorf("assignIPAddresses: %w", err) + } + + // The previous terraform run should have made the special volume for us. + if err := s.initTLS(context.TODO()); err != nil { + return fmt.Errorf("initTLS: %w", err) + } + + if firstTime { + if err := s.createFirstTime(); err != nil { + return err + } + + s.generator.MarkLaunched() + } else { + if err := s.updateExisting(); err != nil { + return err + } + } + + if err := s.waitForPeeringEstablishment(); err != nil { + return fmt.Errorf("waitForPeeringEstablishment: %w", err) + } + + cleanupFuncs = nil // reset + + return nil +} + +func (s *Sprawl) Stop() error { + var merr error + if s.generator != nil { + if err := s.generator.DestroyAllQuietly(); err != nil { + merr = multierror.Append(merr, err) + } + } + return merr +} + +const dockerOutOfNetworksErrorMessage = `Unable to create network: Error response from daemon: Pool overlaps with other one on this address space` + +var ErrDockerNetworkCollision = errors.New("could not create one or more docker networks for use due to subnet collision") + +func (s *Sprawl) initNetworkingAndVolumes() error { + var lastErr error + for attempts := 0; attempts < 5; attempts++ { + err := s.generator.Generate(tfgen.StepNetworks) + if err != nil && strings.Contains(err.Error(), dockerOutOfNetworksErrorMessage) { + lastErr = ErrDockerNetworkCollision + s.logger.Warn(ErrDockerNetworkCollision.Error()+"; retrying", "attempt", attempts+1) + time.Sleep(1 * time.Second) + continue + } else if err != nil { + return fmt.Errorf("generator[networks]: %w", err) + } + return nil + } + + return lastErr +} + +func (s *Sprawl) assignIPAddresses() error { + // assign ips now that we have network ips known to us + for _, net := range s.topology.Networks { + if len(net.IPPool) == 0 { + return fmt.Errorf("network %q does not have any ip assignments", net.Name) + } + } + for _, cluster := range s.topology.Clusters { + for _, node := range cluster.Nodes { + for _, addr := range node.Addresses { + net, ok := s.topology.Networks[addr.Network] + if !ok { + return fmt.Errorf("unknown network %q", addr.Network) + } + addr.IPAddress = net.IPByIndex(node.Index) + } + } + } + + return nil +} + +func (s *Sprawl) initConsulServers() error { + if err := s.generator.Generate(tfgen.StepServers); err != nil { + return fmt.Errorf("generator[servers]: %w", err) + } + + // s.logger.Info("ALL", "t", jd(s.topology)) // TODO + + // Create token-less api clients first. + for _, cluster := range s.topology.Clusters { + node := cluster.FirstServer() + + var err error + s.clients[cluster.Name], err = util.ProxyAPIClient( + node.LocalProxyPort(), + node.LocalAddress(), + 8500, + "", /*no token yet*/ + ) + if err != nil { + return fmt.Errorf("error creating initial bootstrap client for cluster=%s: %w", cluster.Name, err) + } + } + + if err := s.rejoinAllConsulServers(); err != nil { + return err + } + + for _, cluster := range s.topology.Clusters { + err := s.bootstrapACLs(cluster.Name) + if err != nil { + return fmt.Errorf("bootstrap[%s]: %w", cluster.Name, err) + } + + mgmtToken := s.secrets.ReadGeneric(cluster.Name, secrets.BootstrapToken) + + // Reconfigure the clients to use a management token. + node := cluster.FirstServer() + s.clients[cluster.Name], err = util.ProxyAPIClient( + node.LocalProxyPort(), + node.LocalAddress(), + 8500, + mgmtToken, + ) + if err != nil { + return fmt.Errorf("error creating final client for cluster=%s: %v", cluster.Name, err) + } + + // For some reason the grpc resolver stuff for partitions takes some + // time to get ready. + s.waitForLocalWrites(cluster, mgmtToken) + + // Create tenancies so that the ACL tokens and clients have somewhere to go. + if cluster.Enterprise { + if err := s.initTenancies(cluster); err != nil { + return fmt.Errorf("initTenancies[%s]: %w", cluster.Name, err) + } + } + + if err := s.populateInitialConfigEntries(cluster); err != nil { + return fmt.Errorf("populateInitialConfigEntries[%s]: %w", cluster.Name, err) + } + + if err := s.createAnonymousToken(cluster); err != nil { + return fmt.Errorf("createAnonymousToken[%s]: %w", cluster.Name, err) + } + + // Create tokens for all of the agents to use for anti-entropy. + // + // NOTE: this will cause the servers to roll to pick up the change to + // the acl{tokens{agent=XXX}}} section. + if err := s.createAgentTokens(cluster); err != nil { + return fmt.Errorf("createAgentTokens[%s]: %w", cluster.Name, err) + } + } + + return nil +} + +func (s *Sprawl) createFirstTime() error { + if err := s.initConsulServers(); err != nil { + return fmt.Errorf("initConsulServers: %w", err) + } + + if err := s.generator.Generate(tfgen.StepAgents); err != nil { + return fmt.Errorf("generator[agents]: %w", err) + } + for _, cluster := range s.topology.Clusters { + if err := s.waitForClientAntiEntropyOnce(cluster); err != nil { + return fmt.Errorf("waitForClientAntiEntropyOnce[%s]: %w", cluster.Name, err) + } + } + + // Ideally we start services WITH a token initially, so we pre-create them + // before running terraform for them. + if err := s.createAllServiceTokens(); err != nil { + return fmt.Errorf("createAllServiceTokens: %w", err) + } + + if err := s.registerAllServicesForDataplaneInstances(); err != nil { + return fmt.Errorf("registerAllServicesForDataplaneInstances: %w", err) + } + + // We can do this ahead, because we've incrementally run terraform as + // we went. + if err := s.registerAllServicesToAgents(); err != nil { + return fmt.Errorf("registerAllServicesToAgents: %w", err) + } + + // NOTE: start services WITH token initially + if err := s.generator.Generate(tfgen.StepServices); err != nil { + return fmt.Errorf("generator[services]: %w", err) + } + + if err := s.initPeerings(); err != nil { + return fmt.Errorf("initPeerings: %w", err) + } + return nil +} + +func (s *Sprawl) updateExisting() error { + if err := s.preRegenTasks(); err != nil { + return fmt.Errorf("preRegenTasks: %w", err) + } + + // We save all of the terraform to the end. Some of the containers will + // be a little broken until we can do stuff like register services to + // new agents, which we cannot do until they come up. + if err := s.generator.Generate(tfgen.StepRelaunch); err != nil { + return fmt.Errorf("generator[relaunch]: %w", err) + } + + if err := s.postRegenTasks(); err != nil { + return fmt.Errorf("postRegenTasks: %w", err) + } + + // TODO: enforce that peering relationships cannot change + // TODO: include a fixup version of new peerings? + + return nil +} + +func (s *Sprawl) preRegenTasks() error { + for _, cluster := range s.topology.Clusters { + // Create tenancies so that the ACL tokens and clients have somewhere to go. + if cluster.Enterprise { + if err := s.initTenancies(cluster); err != nil { + return fmt.Errorf("initTenancies[%s]: %w", cluster.Name, err) + } + } + + if err := s.populateInitialConfigEntries(cluster); err != nil { + return fmt.Errorf("populateInitialConfigEntries[%s]: %w", cluster.Name, err) + } + + // Create tokens for all of the agents to use for anti-entropy. + if err := s.createAgentTokens(cluster); err != nil { + return fmt.Errorf("createAgentTokens[%s]: %w", cluster.Name, err) + } + } + + // Ideally we start services WITH a token initially, so we pre-create them + // before running terraform for them. + if err := s.createAllServiceTokens(); err != nil { + return fmt.Errorf("createAllServiceTokens: %w", err) + } + + if err := s.registerAllServicesForDataplaneInstances(); err != nil { + return fmt.Errorf("registerAllServicesForDataplaneInstances: %w", err) + } + + return nil +} + +func (s *Sprawl) postRegenTasks() error { + if err := s.rejoinAllConsulServers(); err != nil { + return err + } + + for _, cluster := range s.topology.Clusters { + var err error + + mgmtToken := s.secrets.ReadGeneric(cluster.Name, secrets.BootstrapToken) + + // Reconfigure the clients to use a management token. + node := cluster.FirstServer() + s.clients[cluster.Name], err = util.ProxyAPIClient( + node.LocalProxyPort(), + node.LocalAddress(), + 8500, + mgmtToken, + ) + if err != nil { + return fmt.Errorf("error creating final client for cluster=%s: %v", cluster.Name, err) + } + + s.waitForLeader(cluster) + + // For some reason the grpc resolver stuff for partitions takes some + // time to get ready. + s.waitForLocalWrites(cluster, mgmtToken) + } + + for _, cluster := range s.topology.Clusters { + if err := s.waitForClientAntiEntropyOnce(cluster); err != nil { + return fmt.Errorf("waitForClientAntiEntropyOnce[%s]: %w", cluster.Name, err) + } + } + + if err := s.registerAllServicesToAgents(); err != nil { + return fmt.Errorf("registerAllServicesToAgents: %w", err) + } + + return nil +} + +func (s *Sprawl) waitForLocalWrites(cluster *topology.Cluster, token string) { + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + tryKV := func() error { + _, err := client.KV().Put(&api.KVPair{ + Key: "local-test", + Value: []byte("payload-for-local-test-in-" + cluster.Name), + }, nil) + return err + } + tryAP := func() error { + if !cluster.Enterprise { + return nil + } + _, _, err := client.Partitions().Create(context.Background(), &api.Partition{ + Name: "placeholder", + }, &api.WriteOptions{Token: token}) + return err + } + + start := time.Now() + for attempts := 0; ; attempts++ { + if err := tryKV(); err != nil { + logger.Warn("local kv write failed; something is not ready yet", "error", err) + time.Sleep(500 * time.Millisecond) + continue + } else { + dur := time.Since(start) + logger.Info("local kv write success", "elapsed", dur, "retries", attempts) + } + + break + } + + if cluster.Enterprise { + start = time.Now() + for attempts := 0; ; attempts++ { + if err := tryAP(); err != nil { + logger.Warn("local partition write failed; something is not ready yet", "error", err) + time.Sleep(500 * time.Millisecond) + continue + } else { + dur := time.Since(start) + logger.Info("local partition write success", "elapsed", dur, "retries", attempts) + } + + break + } + } +} + +func (s *Sprawl) waitForClientAntiEntropyOnce(cluster *topology.Cluster) error { + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + var ( + queryOptionList = cluster.PartitionQueryOptionsList() + start = time.Now() + cc = client.Catalog() + ) + for { + // Enumerate all of the nodes that are currently in the catalog. This + // will overmatch including things like fake nodes for agentless but + // that's ok. + current := make(map[topology.NodeID]*api.Node) + for _, queryOpts := range queryOptionList { + nodes, _, err := cc.Nodes(queryOpts) + if err != nil { + return err + } + for _, node := range nodes { + nid := topology.NewNodeID(node.Node, node.Partition) + current[nid] = node + } + } + + // See if we have them all. + var stragglers []topology.NodeID + for _, node := range cluster.Nodes { + if !node.IsAgent() || node.Disabled { + continue + } + nid := node.CatalogID() + + got, ok := current[nid] + if ok && len(got.TaggedAddresses) > 0 { + // this is a field that is not updated just due to serf reconcile + continue + } + + stragglers = append(stragglers, nid) + } + + if len(stragglers) == 0 { + dur := time.Since(start) + logger.Info("all nodes have posted node updates, so first anti-entropy has happened", "elapsed", dur) + return nil + } + logger.Info("not all client nodes have posted node updates yet", "nodes", stragglers) + + time.Sleep(1 * time.Second) + } +} + +func newGossipKey() (string, error) { + key := make([]byte, 16) + n, err := rand.Reader.Read(key) + if err != nil { + return "", fmt.Errorf("Error reading random data: %s", err) + } + if n != 16 { + return "", fmt.Errorf("Couldn't read enough entropy. Generate more entropy!") + } + return base64.StdEncoding.EncodeToString(key), nil +} diff --git a/testing/deployer/sprawl/catalog.go b/testing/deployer/sprawl/catalog.go new file mode 100644 index 000000000000..5da32cc51318 --- /dev/null +++ b/testing/deployer/sprawl/catalog.go @@ -0,0 +1,425 @@ +package sprawl + +import ( + "fmt" + "net/http" + "time" + + "github.com/hashicorp/consul/api" + + "github.com/hashicorp/consul/testing/deployer/topology" + "github.com/hashicorp/consul/testing/deployer/util" +) + +func (s *Sprawl) registerAllServicesToAgents() error { + for _, cluster := range s.topology.Clusters { + if err := s.registerServicesToAgents(cluster); err != nil { + return fmt.Errorf("registerServicesToAgents[%s]: %w", cluster.Name, err) + } + } + return nil +} + +func (s *Sprawl) registerAllServicesForDataplaneInstances() error { + for _, cluster := range s.topology.Clusters { + if err := s.registerServicesForDataplaneInstances(cluster); err != nil { + return fmt.Errorf("registerServicesForDataplaneInstances[%s]: %w", cluster.Name, err) + } + } + return nil +} + +func (s *Sprawl) registerServicesToAgents(cluster *topology.Cluster) error { + for _, node := range cluster.Nodes { + if !node.RunsWorkloads() || len(node.Services) == 0 || node.Disabled { + continue + } + + if !node.IsAgent() { + continue + } + + agentClient, err := util.ProxyAPIClient( + node.LocalProxyPort(), + node.LocalAddress(), + 8500, + "", /*token will be in request*/ + ) + if err != nil { + return err + } + + for _, svc := range node.Services { + if err := s.registerAgentService(agentClient, cluster, node, svc); err != nil { + return err + } + } + } + + return nil +} + +func (s *Sprawl) registerAgentService( + agentClient *api.Client, + cluster *topology.Cluster, + node *topology.Node, + svc *topology.Service, +) error { + if !node.IsAgent() { + panic("called wrong method type") + } + + if svc.IsMeshGateway { + return nil // handled at startup time for agent-full, but won't be for agent-less + } + + var ( + logger = s.logger.With("cluster", cluster.Name) + ) + + reg := &api.AgentServiceRegistration{ + ID: svc.ID.Name, + Name: svc.ID.Name, + Port: svc.Port, + Meta: svc.Meta, + } + if cluster.Enterprise { + reg.Namespace = svc.ID.Namespace + reg.Partition = svc.ID.Partition + } + + if !svc.DisableServiceMesh { + var upstreams []api.Upstream + for _, u := range svc.Upstreams { + uAPI := api.Upstream{ + DestinationPeer: u.Peer, + DestinationName: u.ID.Name, + LocalBindAddress: u.LocalAddress, + LocalBindPort: u.LocalPort, + // Config map[string]interface{} `json:",omitempty" bexpr:"-"` + // MeshGateway MeshGatewayConfig `json:",omitempty"` + } + if cluster.Enterprise { + uAPI.DestinationNamespace = u.ID.Namespace + if u.Peer == "" { + uAPI.DestinationPartition = u.ID.Partition + } + } + upstreams = append(upstreams, uAPI) + } + reg.Connect = &api.AgentServiceConnect{ + SidecarService: &api.AgentServiceRegistration{ + Proxy: &api.AgentServiceConnectProxyConfig{ + Upstreams: upstreams, + }, + }, + } + } + + switch { + case svc.CheckTCP != "": + chk := &api.AgentServiceCheck{ + Name: "up", + TCP: svc.CheckTCP, + Interval: "5s", + Timeout: "1s", + } + reg.Checks = append(reg.Checks, chk) + case svc.CheckHTTP != "": + chk := &api.AgentServiceCheck{ + Name: "up", + HTTP: svc.CheckHTTP, + Method: "GET", + Interval: "5s", + Timeout: "1s", + } + reg.Checks = append(reg.Checks, chk) + } + + // Switch token for every request. + hdr := make(http.Header) + hdr.Set("X-Consul-Token", s.secrets.ReadServiceToken(cluster.Name, svc.ID)) + agentClient.SetHeaders(hdr) + +RETRY: + if err := agentClient.Agent().ServiceRegister(reg); err != nil { + if isACLNotFound(err) { + time.Sleep(50 * time.Millisecond) + goto RETRY + } + return fmt.Errorf("failed to register service %q to node %q: %w", svc.ID, node.ID(), err) + } + + logger.Info("registered service to client agent", + "service", svc.ID.Name, + "node", node.Name, + "namespace", svc.ID.Namespace, + "partition", svc.ID.Partition, + ) + + return nil +} + +func (s *Sprawl) registerServicesForDataplaneInstances(cluster *topology.Cluster) error { + for _, node := range cluster.Nodes { + if !node.RunsWorkloads() || len(node.Services) == 0 || node.Disabled { + continue + } + + if !node.IsDataplane() { + continue + } + + if err := s.registerCatalogNode(cluster, node); err != nil { + return fmt.Errorf("error registering virtual node: %w", err) + } + + for _, svc := range node.Services { + if err := s.registerCatalogService(cluster, node, svc); err != nil { + return fmt.Errorf("error registering service: %w", err) + } + if !svc.DisableServiceMesh { + if err := s.registerCatalogSidecarService(cluster, node, svc); err != nil { + return fmt.Errorf("error registering sidecar service: %w", err) + } + } + } + } + + return nil +} + +func (s *Sprawl) registerCatalogNode( + cluster *topology.Cluster, + node *topology.Node, +) error { + if !node.IsDataplane() { + panic("called wrong method type") + } + + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + reg := &api.CatalogRegistration{ + Node: node.PodName(), + Address: node.LocalAddress(), + NodeMeta: map[string]string{ + "dataplane-faux": "1", + }, + } + if cluster.Enterprise { + reg.Partition = node.Partition + } + + // register synthetic node +RETRY: + if _, err := client.Catalog().Register(reg, nil); err != nil { + if isACLNotFound(err) { + time.Sleep(50 * time.Millisecond) + goto RETRY + } + return fmt.Errorf("error registering virtual node %s: %w", node.ID(), err) + } + + logger.Info("virtual node created", + "node", node.ID(), + ) + + return nil +} + +func (s *Sprawl) registerCatalogService( + cluster *topology.Cluster, + node *topology.Node, + svc *topology.Service, +) error { + if !node.IsDataplane() { + panic("called wrong method type") + } + + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + reg := serviceToCatalogRegistration(cluster, node, svc) + +RETRY: + if _, err := client.Catalog().Register(reg, nil); err != nil { + if isACLNotFound(err) { + time.Sleep(50 * time.Millisecond) + goto RETRY + } + return fmt.Errorf("error registering service %s to node %s: %w", svc.ID, node.ID(), err) + } + + logger.Info("dataplane service created", + "service", svc.ID, + "node", node.ID(), + ) + + return nil +} + +func (s *Sprawl) registerCatalogSidecarService( + cluster *topology.Cluster, + node *topology.Node, + svc *topology.Service, +) error { + if !node.IsDataplane() { + panic("called wrong method type") + } + if svc.DisableServiceMesh { + panic("not valid") + } + + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + pid, reg := serviceToSidecarCatalogRegistration(cluster, node, svc) +RETRY: + if _, err := client.Catalog().Register(reg, nil); err != nil { + if isACLNotFound(err) { + time.Sleep(50 * time.Millisecond) + goto RETRY + } + return fmt.Errorf("error registering service %s to node %s: %w", svc.ID, node.ID(), err) + } + + logger.Info("dataplane sidecar service created", + "service", pid, + "node", node.ID(), + ) + + return nil +} + +func serviceToCatalogRegistration( + cluster *topology.Cluster, + node *topology.Node, + svc *topology.Service, +) *api.CatalogRegistration { + reg := &api.CatalogRegistration{ + Node: node.PodName(), + SkipNodeUpdate: true, + Service: &api.AgentService{ + Kind: api.ServiceKindTypical, + ID: svc.ID.Name, + Service: svc.ID.Name, + Meta: svc.Meta, + Port: svc.Port, + Address: node.LocalAddress(), + }, + } + if node.HasPublicAddress() { + reg.TaggedAddresses = map[string]string{ + "lan": node.LocalAddress(), + "lan_ipv4": node.LocalAddress(), + "wan": node.PublicAddress(), + "wan_ipv4": node.PublicAddress(), + } + } + if cluster.Enterprise { + reg.Partition = svc.ID.Partition + reg.Service.Namespace = svc.ID.Namespace + reg.Service.Partition = svc.ID.Partition + } + + if svc.HasCheck() { + chk := &api.HealthCheck{ + Name: "external sync", + // Type: "external-sync", + Status: "passing", // TODO + ServiceID: svc.ID.Name, + ServiceName: svc.ID.Name, + Output: "", + } + if cluster.Enterprise { + chk.Namespace = svc.ID.Namespace + chk.Partition = svc.ID.Partition + } + switch { + case svc.CheckTCP != "": + chk.Definition.TCP = svc.CheckTCP + case svc.CheckHTTP != "": + chk.Definition.HTTP = svc.CheckHTTP + chk.Definition.Method = "GET" + } + reg.Checks = append(reg.Checks, chk) + } + return reg +} + +func serviceToSidecarCatalogRegistration( + cluster *topology.Cluster, + node *topology.Node, + svc *topology.Service, +) (topology.ServiceID, *api.CatalogRegistration) { + pid := svc.ID + pid.Name += "-sidecar-proxy" + reg := &api.CatalogRegistration{ + Node: node.PodName(), + SkipNodeUpdate: true, + Service: &api.AgentService{ + Kind: api.ServiceKindConnectProxy, + ID: pid.Name, + Service: pid.Name, + Meta: svc.Meta, + Port: svc.EnvoyPublicListenerPort, + Address: node.LocalAddress(), + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: svc.ID.Name, + DestinationServiceID: svc.ID.Name, + LocalServicePort: svc.Port, + }, + }, + Checks: []*api.HealthCheck{{ + Name: "external sync", + // Type: "external-sync", + Status: "passing", // TODO + ServiceID: pid.Name, + ServiceName: pid.Name, + Definition: api.HealthCheckDefinition{ + TCP: fmt.Sprintf("%s:%d", node.LocalAddress(), svc.EnvoyPublicListenerPort), + }, + Output: "", + }}, + } + if node.HasPublicAddress() { + reg.TaggedAddresses = map[string]string{ + "lan": node.LocalAddress(), + "lan_ipv4": node.LocalAddress(), + "wan": node.PublicAddress(), + "wan_ipv4": node.PublicAddress(), + } + } + if cluster.Enterprise { + reg.Partition = pid.Partition + reg.Service.Namespace = pid.Namespace + reg.Service.Partition = pid.Partition + reg.Checks[0].Namespace = pid.Namespace + reg.Checks[0].Partition = pid.Partition + } + + for _, u := range svc.Upstreams { + pu := api.Upstream{ + DestinationName: u.ID.Name, + DestinationPeer: u.Peer, + LocalBindAddress: u.LocalAddress, + LocalBindPort: u.LocalPort, + } + if cluster.Enterprise { + pu.DestinationNamespace = u.ID.Namespace + if u.Peer == "" { + pu.DestinationPartition = u.ID.Partition + } + } + reg.Service.Proxy.Upstreams = append(reg.Service.Proxy.Upstreams, pu) + } + + return pid, reg +} diff --git a/testing/deployer/sprawl/configentries.go b/testing/deployer/sprawl/configentries.go new file mode 100644 index 000000000000..ff84f0eb1a45 --- /dev/null +++ b/testing/deployer/sprawl/configentries.go @@ -0,0 +1,58 @@ +package sprawl + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/consul/api" + + "github.com/hashicorp/consul/testing/deployer/topology" +) + +func (s *Sprawl) populateInitialConfigEntries(cluster *topology.Cluster) error { + if len(cluster.InitialConfigEntries) == 0 { + return nil + } + + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + for _, ce := range cluster.InitialConfigEntries { + _, _, err := client.ConfigEntries().Set(ce, nil) + if err != nil { + if ce.GetKind() == api.ServiceIntentions && strings.Contains(err.Error(), intentionsMigrationError) { + logger.Warn("known error writing initial config entry; trying again", + "kind", ce.GetKind(), + "name", ce.GetName(), + "namespace", ce.GetNamespace(), + "partition", ce.GetPartition(), + "error", err, + ) + + time.Sleep(500 * time.Millisecond) + continue + } + return fmt.Errorf( + "error persisting config entry kind=%q name=%q namespace=%q partition=%q: %w", + ce.GetKind(), + ce.GetName(), + ce.GetNamespace(), + ce.GetPartition(), + err, + ) + } + logger.Info("wrote initial config entry", + "kind", ce.GetKind(), + "name", ce.GetName(), + "namespace", ce.GetNamespace(), + "partition", ce.GetPartition(), + ) + } + + return nil +} + +const intentionsMigrationError = `Intentions are read only while being upgraded to config entries` diff --git a/testing/deployer/sprawl/consul.go b/testing/deployer/sprawl/consul.go new file mode 100644 index 000000000000..5abb68ac8cc1 --- /dev/null +++ b/testing/deployer/sprawl/consul.go @@ -0,0 +1,98 @@ +package sprawl + +import ( + "errors" + "fmt" + "time" + + "github.com/hashicorp/consul/api" + + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets" + "github.com/hashicorp/consul/testing/deployer/topology" + "github.com/hashicorp/consul/testing/deployer/util" +) + +func getLeader(client *api.Client) (string, error) { + leaderAdd, err := client.Status().Leader() + if err != nil { + return "", fmt.Errorf("could not query leader: %w", err) + } + if leaderAdd == "" { + return "", errors.New("no leader available") + } + return leaderAdd, nil +} + +func (s *Sprawl) waitForLeader(cluster *topology.Cluster) { + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + for { + leader, err := client.Status().Leader() + if leader != "" && err == nil { + logger.Info("cluster has leader", "leader_addr", leader) + return + } + logger.Info("cluster has no leader yet", "error", err) + time.Sleep(500 * time.Millisecond) + } +} + +func (s *Sprawl) rejoinAllConsulServers() error { + // Join the servers together. + for _, cluster := range s.topology.Clusters { + if err := s.rejoinServers(cluster); err != nil { + return fmt.Errorf("rejoinServers[%s]: %w", cluster.Name, err) + } + s.waitForLeader(cluster) + } + return nil +} + +func (s *Sprawl) rejoinServers(cluster *topology.Cluster) error { + var ( + // client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + servers := cluster.ServerNodes() + + recoveryToken := s.secrets.ReadGeneric(cluster.Name, secrets.AgentRecovery) + + node0, rest := servers[0], servers[1:] + client, err := util.ProxyNotPooledAPIClient( + node0.LocalProxyPort(), + node0.LocalAddress(), + 8500, + recoveryToken, + ) + if err != nil { + return fmt.Errorf("could not get client for %q: %w", node0.ID(), err) + } + + logger.Info("joining servers together", + "from", node0.ID(), + "rest", nodeSliceToNodeIDSlice(rest), + ) + for _, node := range rest { + for { + err = client.Agent().Join(node.LocalAddress(), false) + if err == nil { + break + } + logger.Warn("could not join", "from", node0.ID(), "to", node.ID(), "error", err) + time.Sleep(500 * time.Millisecond) + } + } + + return nil +} + +func nodeSliceToNodeIDSlice(nodes []*topology.Node) []topology.NodeID { + var out []topology.NodeID + for _, node := range nodes { + out = append(out, node.ID()) + } + return out +} diff --git a/testing/deployer/sprawl/debug.go b/testing/deployer/sprawl/debug.go new file mode 100644 index 000000000000..e02c3eefc95f --- /dev/null +++ b/testing/deployer/sprawl/debug.go @@ -0,0 +1,8 @@ +package sprawl + +import "encoding/json" + +func jd(v any) string { + b, _ := json.MarshalIndent(v, "", " ") + return string(b) +} diff --git a/testing/deployer/sprawl/details.go b/testing/deployer/sprawl/details.go new file mode 100644 index 000000000000..401cc3b9d75c --- /dev/null +++ b/testing/deployer/sprawl/details.go @@ -0,0 +1,170 @@ +package sprawl + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + "text/tabwriter" +) + +// PrintDetails will dump relevant addressing and naming data to the logger for +// human interaction purposes. +func (s *Sprawl) PrintDetails() error { + det := logDetails{ + TopologyID: s.topology.ID, + } + + for _, cluster := range s.topology.Clusters { + client := s.clients[cluster.Name] + + cfg, err := client.Operator().RaftGetConfiguration(nil) + if err != nil { + return fmt.Errorf("could not get raft config for cluster %q: %w", cluster.Name, err) + } + + var leaderNode string + for _, svr := range cfg.Servers { + if svr.Leader { + leaderNode = strings.TrimSuffix(svr.Node, "-pod") + } + } + + cd := clusterDetails{ + Name: cluster.Name, + Leader: leaderNode, + } + + for _, node := range cluster.Nodes { + if node.Disabled { + continue + } + + var addrs []string + for _, addr := range node.Addresses { + addrs = append(addrs, addr.Network+"="+addr.IPAddress) + } + sort.Strings(addrs) + + if node.IsServer() { + cd.Apps = append(cd.Apps, appDetail{ + Type: "server", + Container: node.DockerName(), + Addresses: addrs, + ExposedPort: node.ExposedPort(8500), + }) + } + + for _, svc := range node.Services { + if svc.IsMeshGateway { + cd.Apps = append(cd.Apps, appDetail{ + Type: "mesh-gateway", + Container: node.DockerName(), + ExposedPort: node.ExposedPort(svc.Port), + ExposedEnvoyAdminPort: node.ExposedPort(svc.EnvoyAdminPort), + Addresses: addrs, + Service: svc.ID.String(), + }) + } else { + cd.Apps = append(cd.Apps, appDetail{ + Type: "app", + Container: node.DockerName(), + ExposedPort: node.ExposedPort(svc.Port), + ExposedEnvoyAdminPort: node.ExposedPort(svc.EnvoyAdminPort), + Addresses: addrs, + Service: svc.ID.String(), + }) + } + } + } + + det.Clusters = append(det.Clusters, cd) + } + + var buf bytes.Buffer + w := tabwriter.NewWriter(&buf, 0, 0, 3, ' ', tabwriter.Debug) + + score := map[string]int{ + "server": 0, + "mesh-gateway": 1, + "app": 2, + } + + for _, cluster := range det.Clusters { + fmt.Fprintf(w, "CLUSTER\tTYPE\tCONTAINER\tNAME\tADDRS\tPORTS\t\n") + sort.Slice(cluster.Apps, func(i, j int) bool { + a := cluster.Apps[i] + b := cluster.Apps[j] + + asc := score[a.Type] + bsc := score[b.Type] + + if asc < bsc { + return true + } else if asc > bsc { + return false + } + + if a.Container < b.Container { + return true + } else if a.Container > b.Container { + return false + } + + if a.Service < b.Service { + return true + } else if a.Service > b.Service { + return false + } + + return a.ExposedPort < b.ExposedPort + }) + for _, d := range cluster.Apps { + if d.Type == "server" && d.Container == cluster.Leader { + d.Type = "leader" + } + portStr := "app=" + strconv.Itoa(d.ExposedPort) + if d.ExposedEnvoyAdminPort > 0 { + portStr += " envoy=" + strconv.Itoa(d.ExposedEnvoyAdminPort) + } + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t\n", + cluster.Name, + d.Type, + d.Container, + d.Service, + strings.Join(d.Addresses, ", "), + portStr, + ) + } + fmt.Fprintf(w, "\t\t\t\t\t\n") + } + + w.Flush() + + s.logger.Info("CURRENT SPRAWL DETAILS", "details", buf.String()) + + return nil +} + +type logDetails struct { + TopologyID string + Clusters []clusterDetails +} + +type clusterDetails struct { + Name string + + Leader string + Apps []appDetail +} + +type appDetail struct { + Type string // server|mesh-gateway|app + Container string + Addresses []string + ExposedPort int `json:",omitempty"` + ExposedEnvoyAdminPort int `json:",omitempty"` + // just services + Service string `json:",omitempty"` +} diff --git a/testing/deployer/sprawl/ent.go b/testing/deployer/sprawl/ent.go new file mode 100644 index 000000000000..f6d73e0e0eae --- /dev/null +++ b/testing/deployer/sprawl/ent.go @@ -0,0 +1,174 @@ +package sprawl + +import ( + "bytes" + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/hashicorp/consul/api" + + "github.com/hashicorp/consul/testing/deployer/topology" +) + +func (s *Sprawl) ensureLicense() error { + if s.license != "" { + return nil + } + v, err := readLicense() + if err != nil { + return err + } + s.license = v + return nil +} + +func readLicense() (string, error) { + if license := os.Getenv("CONSUL_LICENSE"); license != "" { + return license, nil + } + + licensePath := os.Getenv("CONSUL_LICENSE_PATH") + if licensePath == "" { + return "", nil + } + + licenseBytes, err := os.ReadFile(licensePath) + if err != nil { + return "", err + } + return strings.TrimSpace(string(licenseBytes)), nil +} + +func (s *Sprawl) initTenancies(cluster *topology.Cluster) error { + var ( + client = s.clients[cluster.Name] + logger = s.logger.With("cluster", cluster.Name) + ) + + // TODO: change this to UPSERT + + var ( + partClient = client.Partitions() + nsClient = client.Namespaces() + + partitionNameList []string + ) + for _, ap := range cluster.Partitions { + if ap.Name != "default" { + old, _, err := partClient.Read(context.Background(), ap.Name, nil) + if err != nil { + return fmt.Errorf("error reading partition %q: %w", ap.Name, err) + } + if old == nil { + obj := &api.Partition{ + Name: ap.Name, + } + + _, _, err := partClient.Create(context.Background(), obj, nil) + if err != nil { + return fmt.Errorf("error creating partition %q: %w", ap.Name, err) + } + logger.Info("created partition", "partition", ap.Name) + } + + partitionNameList = append(partitionNameList, ap.Name) + } + + if err := s.createCrossNamespaceCatalogReadPolicies(cluster, ap.Name); err != nil { + return fmt.Errorf("createCrossNamespaceCatalogReadPolicies[%s]: %w", ap.Name, err) + } + + for _, ns := range ap.Namespaces { + old, _, err := nsClient.Read(ns, &api.QueryOptions{Partition: ap.Name}) + if err != nil { + return err + } + + if old == nil { + obj := &api.Namespace{ + Partition: ap.Name, + Name: ns, + ACLs: &api.NamespaceACLConfig{ + PolicyDefaults: []api.ACLLink{ + {Name: "cross-ns-catalog-read"}, + }, + }, + } + if ns == "default" { + _, _, err := nsClient.Update(obj, nil) + if err != nil { + return err + } + logger.Info("updated namespace", "namespace", ns, "partition", ap.Name) + } else { + _, _, err := nsClient.Create(obj, nil) + if err != nil { + return err + } + logger.Info("created namespace", "namespace", ns, "partition", ap.Name) + } + } + } + } + + if err := s.waitUntilPartitionedSerfIsReady(context.TODO(), cluster, partitionNameList); err != nil { + return fmt.Errorf("waitUntilPartitionedSerfIsReady: %w", err) + } + + return nil +} + +func (s *Sprawl) waitUntilPartitionedSerfIsReady(ctx context.Context, cluster *topology.Cluster, partitions []string) error { + var ( + logger = s.logger.With("cluster", cluster.Name) + ) + + readyLogs := make(map[string]string) + for _, partition := range partitions { + readyLogs[partition] = `agent.server: Added serf partition to gossip network: partition=` + partition + } + + start := time.Now() + logger.Info("waiting for partitioned serf to be ready on all servers", "partitions", partitions) + for _, node := range cluster.Nodes { + if !node.IsServer() || node.Disabled { + continue + } + + var buf bytes.Buffer + for { + buf.Reset() + + err := s.runner.DockerExec(ctx, []string{ + "logs", node.DockerName(), + }, &buf, nil) + if err != nil { + return fmt.Errorf("could not fetch docker logs from node %q: %w", node.ID(), err) + } + + var ( + body = buf.String() + found []string + ) + + for partition, readyLog := range readyLogs { + if strings.Contains(body, readyLog) { + found = append(found, partition) + } + } + + if len(found) == len(readyLogs) { + break + } + } + + time.Sleep(500 * time.Millisecond) + } + + logger.Info("partitioned serf is ready on all servers", "partitions", partitions, "elapsed", time.Since(start)) + + return nil +} diff --git a/testing/deployer/sprawl/helpers.go b/testing/deployer/sprawl/helpers.go new file mode 100644 index 000000000000..ce546afed623 --- /dev/null +++ b/testing/deployer/sprawl/helpers.go @@ -0,0 +1,11 @@ +package sprawl + +// Deprecated: remove +func TruncateSquidError(err error) error { + return err +} + +// Deprecated: remove +func IsSquid503(err error) bool { + return false +} diff --git a/testing/deployer/sprawl/internal/build/docker.go b/testing/deployer/sprawl/internal/build/docker.go new file mode 100644 index 000000000000..88e763061e93 --- /dev/null +++ b/testing/deployer/sprawl/internal/build/docker.go @@ -0,0 +1,83 @@ +package build + +import ( + "context" + "strings" + + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner" + "github.com/hashicorp/consul/testing/deployer/topology" +) + +const dockerfileEnvoy = ` +ARG CONSUL_IMAGE +ARG ENVOY_IMAGE +FROM ${CONSUL_IMAGE} +FROM ${ENVOY_IMAGE} +COPY --from=0 /bin/consul /bin/consul +` + +// FROM hashicorp/consul-dataplane:latest +// COPY --from=busybox:uclibc /bin/sh /bin/sh +const dockerfileDataplane = ` +ARG DATAPLANE_IMAGE +FROM busybox:latest +FROM ${DATAPLANE_IMAGE} +COPY --from=0 /bin/busybox /bin/busybox +USER 0:0 +RUN ["busybox", "--install", "/bin", "-s"] +USER 100:0 +ENTRYPOINT [] +` + +func DockerImages( + logger hclog.Logger, + run *runner.Runner, + t *topology.Topology, +) error { + logw := logger.Named("docker").StandardWriter(&hclog.StandardLoggerOptions{ForceLevel: hclog.Info}) + + built := make(map[string]struct{}) + for _, c := range t.Clusters { + for _, n := range c.Nodes { + joint := n.Images.EnvoyConsulImage() + if _, ok := built[joint]; joint != "" && !ok { + logger.Info("building image", "image", joint) + err := run.DockerExec(context.TODO(), []string{ + "build", + "--build-arg", + "CONSUL_IMAGE=" + n.Images.Consul, + "--build-arg", + "ENVOY_IMAGE=" + n.Images.Envoy, + "-t", joint, + "-", + }, logw, strings.NewReader(dockerfileEnvoy)) + if err != nil { + return err + } + + built[joint] = struct{}{} + } + + cdp := n.Images.LocalDataplaneImage() + if _, ok := built[cdp]; cdp != "" && !ok { + logger.Info("building image", "image", cdp) + err := run.DockerExec(context.TODO(), []string{ + "build", + "--build-arg", + "DATAPLANE_IMAGE=" + n.Images.Dataplane, + "-t", cdp, + "-", + }, logw, strings.NewReader(dockerfileDataplane)) + if err != nil { + return err + } + + built[cdp] = struct{}{} + } + } + } + + return nil +} diff --git a/testing/deployer/sprawl/internal/runner/exec.go b/testing/deployer/sprawl/internal/runner/exec.go new file mode 100644 index 000000000000..896d8f0d79b5 --- /dev/null +++ b/testing/deployer/sprawl/internal/runner/exec.go @@ -0,0 +1,120 @@ +package runner + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "os/exec" + + "github.com/hashicorp/go-hclog" +) + +type Runner struct { + logger hclog.Logger + + tfBin string + dockerBin string +} + +func Load(logger hclog.Logger) (*Runner, error) { + r := &Runner{ + logger: logger, + } + + type item struct { + name string + dest *string + warn string // optional + } + lookup := []item{ + {"docker", &r.dockerBin, ""}, + {"terraform", &r.tfBin, ""}, + } + + var ( + bins []string + err error + ) + for _, i := range lookup { + *i.dest, err = exec.LookPath(i.name) + if err != nil { + if errors.Is(err, exec.ErrNotFound) { + if i.warn != "" { + return nil, fmt.Errorf("Could not find %q on path (%s): %w", i.name, i.warn, err) + } else { + return nil, fmt.Errorf("Could not find %q on path: %w", i.name, err) + } + } + return nil, fmt.Errorf("Unexpected failure looking for %q on path: %w", i.name, err) + } + bins = append(bins, *i.dest) + } + r.logger.Trace("using binaries", "paths", bins) + + return r, nil +} + +func (r *Runner) DockerExec(ctx context.Context, args []string, stdout io.Writer, stdin io.Reader) error { + return cmdExec(ctx, "docker", r.dockerBin, args, stdout, nil, stdin, "") +} + +func (r *Runner) DockerExecWithStderr(ctx context.Context, args []string, stdout, stderr io.Writer, stdin io.Reader) error { + return cmdExec(ctx, "docker", r.dockerBin, args, stdout, stderr, stdin, "") +} + +func (r *Runner) TerraformExec(ctx context.Context, args []string, stdout io.Writer, workdir string) error { + return cmdExec(ctx, "terraform", r.tfBin, args, stdout, nil, nil, workdir) +} + +func cmdExec(ctx context.Context, name, binary string, args []string, stdout, stderr io.Writer, stdin io.Reader, dir string) error { + if binary == "" { + panic("binary named " + name + " was not detected") + } + var errWriter bytes.Buffer + + if stdout == nil { + stdout = os.Stdout // TODO: wrap logs + } + + cmd := exec.CommandContext(ctx, binary, args...) + if dir != "" { + cmd.Dir = dir + } + cmd.Stdout = stdout + cmd.Stderr = &errWriter + if stderr != nil { + cmd.Stderr = io.MultiWriter(stderr, cmd.Stderr) + } + cmd.Stdin = stdin + if err := cmd.Run(); err != nil { + return &ExecError{ + BinaryName: name, + Err: err, + ErrorOutput: errWriter.String(), + } + } + + return nil +} + +type ExecError struct { + BinaryName string + ErrorOutput string + Err error +} + +func (e *ExecError) Unwrap() error { + return e.Err +} + +func (e *ExecError) Error() string { + return fmt.Sprintf( + "could not invoke %q: %v : %s", + e.BinaryName, + e.Err, + e.ErrorOutput, + ) +} diff --git a/testing/deployer/sprawl/internal/secrets/store.go b/testing/deployer/sprawl/internal/secrets/store.go new file mode 100644 index 000000000000..4430686cb28a --- /dev/null +++ b/testing/deployer/sprawl/internal/secrets/store.go @@ -0,0 +1,70 @@ +package secrets + +import ( + "net/url" + "strings" + + "github.com/hashicorp/consul/testing/deployer/topology" +) + +type Store struct { + m map[string]string +} + +const ( + GossipKey = "gossip" + BootstrapToken = "bootstrap-token" + AgentRecovery = "agent-recovery" +) + +func (s *Store) SaveGeneric(cluster, name, value string) { + s.save(encode(cluster, "generic", name), value) +} + +func (s *Store) ReadGeneric(cluster, name string) string { + return s.read(encode(cluster, "generic", name)) +} + +func (s *Store) SaveAgentToken(cluster string, nid topology.NodeID, value string) { + s.save(encode(cluster, "agent", nid.String()), value) +} + +func (s *Store) ReadAgentToken(cluster string, nid topology.NodeID) string { + return s.read(encode(cluster, "agent", nid.String())) +} + +func (s *Store) SaveServiceToken(cluster string, sid topology.ServiceID, value string) { + s.save(encode(cluster, "service", sid.String()), value) +} + +func (s *Store) ReadServiceToken(cluster string, sid topology.ServiceID) string { + return s.read(encode(cluster, "service", sid.String())) +} + +func (s *Store) save(key, value string) { + if s.m == nil { + s.m = make(map[string]string) + } + + s.m[key] = value +} + +func (s *Store) read(key string) string { + if s.m == nil { + return "" + } + + v, ok := s.m[key] + if !ok { + return "" + } + return v +} + +func encode(parts ...string) string { + var out []string + for _, p := range parts { + out = append(out, url.QueryEscape(p)) + } + return strings.Join(out, "/") +} diff --git a/testing/deployer/sprawl/internal/tfgen/agent.go b/testing/deployer/sprawl/internal/tfgen/agent.go new file mode 100644 index 000000000000..43e1fe1db17d --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/agent.go @@ -0,0 +1,215 @@ +package tfgen + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2/hclwrite" + + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets" + "github.com/hashicorp/consul/testing/deployer/topology" +) + +func (g *Generator) generateAgentHCL(node *topology.Node) (string, error) { + if !node.IsAgent() { + return "", fmt.Errorf("not an agent") + } + + cluster, ok := g.topology.Clusters[node.Cluster] + if !ok { + return "", fmt.Errorf("no such cluster: %s", node.Cluster) + } + + var b HCLBuilder + + b.add("server", node.IsServer()) + b.add("bind_addr", "0.0.0.0") + b.add("client_addr", "0.0.0.0") + b.add("advertise_addr", `{{ GetInterfaceIP "eth0" }}`) + b.add("datacenter", node.Datacenter) + b.add("disable_update_check", true) + b.add("log_level", "trace") + b.add("enable_debug", true) + b.add("use_streaming_backend", true) + + // speed up leaves + b.addBlock("performance", func() { + b.add("leave_drain_time", "50ms") + }) + + b.add("primary_datacenter", node.Datacenter) + + // Using retry_join here is bad because changing server membership will + // destroy and recreate all of the servers + // if !node.IsServer() { + b.addSlice("retry_join", []string{"server." + node.Cluster + "-consulcluster.lan"}) + b.add("retry_interval", "1s") + // } + + if node.IsServer() { + b.addBlock("peering", func() { + b.add("enabled", true) + }) + } + + b.addBlock("ui_config", func() { + b.add("enabled", true) + }) + + b.addBlock("telemetry", func() { + b.add("disable_hostname", true) + b.add("prometheus_retention_time", "168h") + }) + + b.add("encrypt", g.sec.ReadGeneric(node.Cluster, secrets.GossipKey)) + + { + var ( + root = "/consul/config/certs" + caFile = root + "/consul-agent-ca.pem" + certFile = root + "/" + node.TLSCertPrefix + ".pem" + certKey = root + "/" + node.TLSCertPrefix + "-key.pem" + ) + + b.addBlock("tls", func() { + b.addBlock("internal_rpc", func() { + b.add("ca_file", caFile) + b.add("cert_file", certFile) + b.add("key_file", certKey) + b.add("verify_incoming", true) + b.add("verify_server_hostname", true) + b.add("verify_outgoing", true) + }) + // if cfg.EncryptionTLSAPI { + // b.addBlock("https", func() { + // b.add("ca_file", caFile) + // b.add("cert_file", certFile) + // b.add("key_file", certKey) + // // b.add("verify_incoming", true) + // }) + // } + if node.IsServer() { + b.addBlock("grpc", func() { + b.add("ca_file", caFile) + b.add("cert_file", certFile) + b.add("key_file", certKey) + // b.add("verify_incoming", true) + }) + } + }) + } + + b.addBlock("ports", func() { + if node.IsServer() { + b.add("grpc_tls", 8503) + b.add("grpc", -1) + } else { + b.add("grpc", 8502) + b.add("grpc_tls", -1) + } + b.add("http", 8500) + b.add("dns", 8600) + }) + + b.addSlice("recursors", []string{"8.8.8.8"}) + + b.addBlock("acl", func() { + b.add("enabled", true) + b.add("default_policy", "deny") + b.add("down_policy", "extend-cache") + b.add("enable_token_persistence", true) + b.addBlock("tokens", func() { + if node.IsServer() { + b.add("initial_management", g.sec.ReadGeneric(node.Cluster, secrets.BootstrapToken)) + } + b.add("agent_recovery", g.sec.ReadGeneric(node.Cluster, secrets.AgentRecovery)) + b.add("agent", g.sec.ReadAgentToken(node.Cluster, node.ID())) + }) + }) + + if node.IsServer() { + b.add("bootstrap_expect", len(cluster.ServerNodes())) + // b.add("translate_wan_addrs", true) + b.addBlock("rpc", func() { + b.add("enable_streaming", true) + }) + if node.HasPublicAddress() { + b.add("advertise_addr_wan", `{{ GetInterfaceIP "eth1" }}`) // note: can't use 'node.PublicAddress()' b/c we don't know that yet + } + + // Exercise config entry bootstrap + // b.addBlock("config_entries", func() { + // b.addBlock("bootstrap", func() { + // b.add("kind", "service-defaults") + // b.add("name", "placeholder") + // b.add("protocol", "grpc") + // }) + // b.addBlock("bootstrap", func() { + // b.add("kind", "service-intentions") + // b.add("name", "placeholder") + // b.addBlock("sources", func() { + // b.add("name", "placeholder-client") + // b.add("action", "allow") + // }) + // }) + // }) + + b.addBlock("connect", func() { + b.add("enabled", true) + }) + + } else { + if cluster.Enterprise { + b.add("partition", node.Partition) + } + } + + return b.String(), nil +} + +type HCLBuilder struct { + parts []string +} + +func (b *HCLBuilder) format(s string, a ...any) { + if len(a) == 0 { + b.parts = append(b.parts, s) + } else { + b.parts = append(b.parts, fmt.Sprintf(s, a...)) + } +} + +func (b *HCLBuilder) add(k string, v any) { + switch x := v.(type) { + case string: + if x != "" { + b.format("%s = %q", k, x) + } + case int: + b.format("%s = %d", k, x) + case bool: + b.format("%s = %v", k, x) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } +} + +func (b *HCLBuilder) addBlock(block string, fn func()) { + b.format(block + "{") + fn() + b.format("}") +} + +func (b *HCLBuilder) addSlice(name string, vals []string) { + b.format(name + " = [") + for _, v := range vals { + b.format("%q,", v) + } + b.format("]") +} + +func (b *HCLBuilder) String() string { + joined := strings.Join(b.parts, "\n") + // Ensure it looks tidy + return string(hclwrite.Format([]byte(joined))) +} diff --git a/testing/deployer/sprawl/internal/tfgen/digest.go b/testing/deployer/sprawl/internal/tfgen/digest.go new file mode 100644 index 000000000000..28e364417bce --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/digest.go @@ -0,0 +1,45 @@ +package tfgen + +import ( + "fmt" +) + +// digestOutputs takes the data extracted from terraform output variables and +// updates various fields on the topology.Topology with that data. +func (g *Generator) digestOutputs(out *Outputs) error { + for clusterName, nodeMap := range out.Nodes { + cluster, ok := g.topology.Clusters[clusterName] + if !ok { + return fmt.Errorf("found output cluster that does not exist: %s", clusterName) + } + for nid, nodeOut := range nodeMap { + node := cluster.NodeByID(nid) + if node == nil { + return fmt.Errorf("found output node that does not exist in cluster %q: %s", nid, clusterName) + } + + if node.DigestExposedPorts(nodeOut.Ports) { + g.logger.Info("discovered exposed port mappings", + "cluster", clusterName, + "node", nid.String(), + "ports", nodeOut.Ports, + ) + } + } + } + + for netName, proxyPort := range out.ProxyPorts { + changed, err := g.topology.DigestExposedProxyPort(netName, proxyPort) + if err != nil { + return err + } + if changed { + g.logger.Info("discovered exposed forward proxy port", + "network", netName, + "port", proxyPort, + ) + } + } + + return nil +} diff --git a/testing/deployer/sprawl/internal/tfgen/dns.go b/testing/deployer/sprawl/internal/tfgen/dns.go new file mode 100644 index 000000000000..c60a19bb0c28 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/dns.go @@ -0,0 +1,180 @@ +package tfgen + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + "text/template" + + "github.com/hashicorp/consul/testing/deployer/topology" + "github.com/hashicorp/consul/testing/deployer/util" +) + +func (g *Generator) getCoreDNSContainer( + net *topology.Network, + ipAddress string, + hashes []string, +) Resource { + var env []string + for i, hv := range hashes { + env = append(env, fmt.Sprintf("HASH_FILE_%d_VALUE=%s", i, hv)) + } + coredns := struct { + Name string + DockerNetworkName string + IPAddress string + HashValues string + Env []string + }{ + Name: net.Name, + DockerNetworkName: net.DockerName, + IPAddress: ipAddress, + Env: env, + } + return Eval(tfCorednsT, &coredns) +} + +func (g *Generator) writeCoreDNSFiles(net *topology.Network, dnsIPAddress string) (bool, []string, error) { + if net.IsPublic() { + return false, nil, fmt.Errorf("coredns only runs on local networks") + } + + rootdir := filepath.Join(g.workdir, "terraform", "coredns-config-"+net.Name) + if err := os.MkdirAll(rootdir, 0755); err != nil { + return false, nil, err + } + + for _, cluster := range g.topology.Clusters { + if cluster.NetworkName != net.Name { + continue + } + var addrs []string + for _, node := range cluster.SortedNodes() { + if node.Kind != topology.NodeKindServer || node.Disabled { + continue + } + addr := node.AddressByNetwork(net.Name) + if addr.IPAddress != "" { + addrs = append(addrs, addr.IPAddress) + } + } + + var ( + clusterDNSName = cluster.Name + "-consulcluster.lan" + ) + + corefilePath := filepath.Join(rootdir, "Corefile") + zonefilePath := filepath.Join(rootdir, "servers") + + _, err := UpdateFileIfDifferent( + g.logger, + generateCoreDNSConfigFile( + clusterDNSName, + addrs, + ), + corefilePath, + 0644, + ) + if err != nil { + return false, nil, fmt.Errorf("error writing %q: %w", corefilePath, err) + } + corefileHash, err := util.HashFile(corefilePath) + if err != nil { + return false, nil, fmt.Errorf("error hashing %q: %w", corefilePath, err) + } + + _, err = UpdateFileIfDifferent( + g.logger, + generateCoreDNSZoneFile( + dnsIPAddress, + clusterDNSName, + addrs, + ), + zonefilePath, + 0644, + ) + if err != nil { + return false, nil, fmt.Errorf("error writing %q: %w", zonefilePath, err) + } + zonefileHash, err := util.HashFile(zonefilePath) + if err != nil { + return false, nil, fmt.Errorf("error hashing %q: %w", zonefilePath, err) + } + + return true, []string{corefileHash, zonefileHash}, nil + } + + return false, nil, nil +} + +func generateCoreDNSConfigFile( + clusterDNSName string, + addrs []string, +) []byte { + serverPart := "" + if len(addrs) > 0 { + var servers []string + for _, addr := range addrs { + servers = append(servers, addr+":8600") + } + serverPart = fmt.Sprintf(` +consul:53 { + forward . %s + log + errors + whoami +} +`, strings.Join(servers, " ")) + } + + return []byte(fmt.Sprintf(` +%[1]s:53 { + file /config/servers %[1]s + log + errors + whoami +} + +%[2]s + +.:53 { + forward . 8.8.8.8:53 + log + errors + whoami +} +`, clusterDNSName, serverPart)) +} + +func generateCoreDNSZoneFile( + dnsIPAddress string, + clusterDNSName string, + addrs []string, +) []byte { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf(` +$TTL 60 +$ORIGIN %[1]s. +@ IN SOA ns.%[1]s. webmaster.%[1]s. ( + 2017042745 ; serial + 7200 ; refresh (2 hours) + 3600 ; retry (1 hour) + 1209600 ; expire (2 weeks) + 3600 ; minimum (1 hour) + ) +@ IN NS ns.%[1]s. ; Name server +ns IN A %[2]s ; self +`, clusterDNSName, dnsIPAddress)) + + for _, addr := range addrs { + buf.WriteString(fmt.Sprintf(` +server IN A %s ; Consul server +`, addr)) + } + + return buf.Bytes() +} + +var tfCorednsT = template.Must(template.ParseFS(content, "templates/container-coredns.tf.tmpl")) diff --git a/testing/deployer/sprawl/internal/tfgen/docker.go b/testing/deployer/sprawl/internal/tfgen/docker.go new file mode 100644 index 000000000000..f2a655feccd0 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/docker.go @@ -0,0 +1,39 @@ +package tfgen + +import ( + "fmt" + "regexp" +) + +var invalidResourceName = regexp.MustCompile(`[^a-z0-9-]+`) + +func DockerImageResourceName(image string) string { + return invalidResourceName.ReplaceAllLiteralString(image, "-") +} + +func DockerNetwork(name, subnet string) Resource { + return Text(fmt.Sprintf(` +resource "docker_network" %[1]q { + name = %[1]q + attachable = true + ipam_config { + subnet = %[2]q + } +} +`, name, subnet)) +} + +func DockerVolume(name string) Resource { + return Text(fmt.Sprintf(` +resource "docker_volume" %[1]q { + name = %[1]q +}`, name)) +} + +func DockerImage(name, image string) Resource { + return Text(fmt.Sprintf(` +resource "docker_image" %[1]q { + name = %[2]q + keep_locally = true +}`, name, image)) +} diff --git a/testing/deployer/sprawl/internal/tfgen/docker_test.go b/testing/deployer/sprawl/internal/tfgen/docker_test.go new file mode 100644 index 000000000000..97f38bc530cd --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/docker_test.go @@ -0,0 +1,15 @@ +package tfgen + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDockerImageResourceName(t *testing.T) { + fn := DockerImageResourceName + + assert.Equal(t, "", fn("")) + assert.Equal(t, "abcdefghijklmnopqrstuvwxyz0123456789-", fn("abcdefghijklmnopqrstuvwxyz0123456789-")) + assert.Equal(t, "hashicorp-consul-1-15-0", fn("hashicorp/consul:1.15.0")) +} diff --git a/testing/deployer/sprawl/internal/tfgen/gen.go b/testing/deployer/sprawl/internal/tfgen/gen.go new file mode 100644 index 000000000000..9e34edacdd1c --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/gen.go @@ -0,0 +1,475 @@ +package tfgen + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner" + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets" + "github.com/hashicorp/consul/testing/deployer/topology" + "github.com/hashicorp/consul/testing/deployer/util" +) + +type Generator struct { + logger hclog.Logger + runner *runner.Runner + topology *topology.Topology + sec *secrets.Store + workdir string + license string + + tfLogger io.Writer + + // set during network phase + remainingSubnets map[string]struct{} + + launched bool +} + +func NewGenerator( + logger hclog.Logger, + runner *runner.Runner, + topo *topology.Topology, + sec *secrets.Store, + workdir string, + license string, +) (*Generator, error) { + if logger == nil { + panic("logger is required") + } + if runner == nil { + panic("runner is required") + } + if topo == nil { + panic("topology is required") + } + if sec == nil { + panic("secrets store is required") + } + if workdir == "" { + panic("workdir is required") + } + + g := &Generator{ + logger: logger, + runner: runner, + sec: sec, + workdir: workdir, + license: license, + + tfLogger: logger.Named("terraform").StandardWriter(&hclog.StandardLoggerOptions{ + ForceLevel: hclog.Info, + }), + } + g.SetTopology(topo) + + _ = g.terraformDestroy(context.Background(), true) // cleanup prior run + + return g, nil +} + +func (g *Generator) MarkLaunched() { + g.launched = true +} + +func (g *Generator) SetTopology(topo *topology.Topology) { + if topo == nil { + panic("topology is required") + } + g.topology = topo +} + +type Step int + +const ( + StepAll Step = 0 + StepNetworks Step = 1 + StepServers Step = 2 + StepAgents Step = 3 + StepServices Step = 4 + // StepPeering Step = XXX5 + StepRelaunch Step = 5 +) + +func (s Step) String() string { + switch s { + case StepAll: + return "all" + case StepNetworks: + return "networks" + case StepServers: + return "servers" + case StepAgents: + return "agents" + case StepServices: + return "services" + case StepRelaunch: + return "relaunch" + // case StepPeering: + // return "peering" + default: + return "UNKNOWN--" + strconv.Itoa(int(s)) + } +} + +func (s Step) StartServers() bool { return s >= StepServers } +func (s Step) StartAgents() bool { return s >= StepAgents } +func (s Step) StartServices() bool { return s >= StepServices } + +// func (s Step) InitiatePeering() bool { return s >= StepPeering } + +func (g *Generator) Regenerate() error { + return g.Generate(StepRelaunch) +} + +func (g *Generator) Generate(step Step) error { + if g.launched && step != StepRelaunch { + return fmt.Errorf("cannot use step %q after successful launch; see Regenerate()", step) + } + + g.logger.Info("generating and creating resources", "step", step.String()) + var ( + networks []Resource + volumes []Resource + images []Resource + containers []Resource + + imageNames = make(map[string]string) + ) + + addVolume := func(name string) { + volumes = append(volumes, DockerVolume(name)) + } + addImage := func(name, image string) { + if image == "" { + return + } + if _, ok := imageNames[image]; ok { + return + } + + if name == "" { + name = DockerImageResourceName(image) + } + + imageNames[image] = name + + g.logger.Info("registering image", "resource", name, "image", image) + + images = append(images, DockerImage(name, image)) + } + + if g.remainingSubnets == nil { + g.remainingSubnets = util.GetPossibleDockerNetworkSubnets() + } + if len(g.remainingSubnets) == 0 { + return fmt.Errorf("exhausted all docker networks") + } + + addImage("nginx", "nginx:latest") + addImage("coredns", "coredns/coredns:latest") + for _, net := range g.topology.SortedNetworks() { + if net.Subnet == "" { + // Because this harness runs on a linux or macos host, we can't + // directly invoke the moby libnetwork calls to check for free + // subnets as it would have to cross into the docker desktop vm on + // mac. + // + // Instead rely on map iteration order being random to avoid + // collisions, but detect the terraform failure and retry until + // success. + + var ipnet string + for ipnet = range g.remainingSubnets { + } + if ipnet == "" { + return fmt.Errorf("could not get a free docker network") + } + delete(g.remainingSubnets, ipnet) + + if _, err := net.SetSubnet(ipnet); err != nil { + return fmt.Errorf("assigned subnet is invalid %q: %w", ipnet, err) + } + } + networks = append(networks, DockerNetwork(net.DockerName, net.Subnet)) + + var ( + // We always ask for a /24, so just blindly pick x.x.x.252 as our + // proxy address. There's an offset of 2 in the list of available + // addresses here because we removed x.x.x.0 and x.x.x.1 from the + // pool. + proxyIPAddress = net.IPByIndex(250) + // Grab x.x.x.253 for the dns server + dnsIPAddress = net.IPByIndex(251) + ) + + { + // wrote, hashes, err := g.write + } + + { // nginx forward proxy + _, hash, err := g.writeNginxConfig(net) + if err != nil { + return fmt.Errorf("writeNginxConfig[%s]: %w", net.Name, err) + } + + containers = append(containers, g.getForwardProxyContainer(net, proxyIPAddress, hash)) + + } + + net.ProxyAddress = proxyIPAddress + net.DNSAddress = "" + + if net.IsLocal() { + wrote, hashes, err := g.writeCoreDNSFiles(net, dnsIPAddress) + if err != nil { + return fmt.Errorf("writeCoreDNSFiles[%s]: %w", net.Name, err) + } + if wrote { + net.DNSAddress = dnsIPAddress + containers = append(containers, g.getCoreDNSContainer(net, dnsIPAddress, hashes)) + } + } + } + + for _, c := range g.topology.SortedClusters() { + if c.TLSVolumeName == "" { + c.TLSVolumeName = c.Name + "-tls-material-" + g.topology.ID + } + addVolume(c.TLSVolumeName) + } + + addImage("pause", "registry.k8s.io/pause:3.3") + + if step.StartServers() { + for _, c := range g.topology.SortedClusters() { + for _, node := range c.SortedNodes() { + if node.Disabled { + continue + } + addImage("", node.Images.Consul) + addImage("", node.Images.EnvoyConsulImage()) + addImage("", node.Images.LocalDataplaneImage()) + + if node.IsAgent() { + addVolume(node.DockerName()) + } + + for _, svc := range node.Services { + addImage("", svc.Image) + } + + myContainers, err := g.generateNodeContainers(step, c, node) + if err != nil { + return err + } + + containers = append(containers, myContainers...) + } + } + } + + tfpath := func(p string) string { + return filepath.Join(g.workdir, "terraform", p) + } + + if _, err := WriteHCLResourceFile(g.logger, []Resource{Text(terraformPrelude)}, tfpath("init.tf"), 0644); err != nil { + return err + } + if netResult, err := WriteHCLResourceFile(g.logger, networks, tfpath("networks.tf"), 0644); err != nil { + return err + } else if netResult == UpdateResultModified { + if step != StepNetworks { + return fmt.Errorf("cannot change networking details after they are established") + } + } + if _, err := WriteHCLResourceFile(g.logger, volumes, tfpath("volumes.tf"), 0644); err != nil { + return err + } + if _, err := WriteHCLResourceFile(g.logger, images, tfpath("images.tf"), 0644); err != nil { + return err + } + if _, err := WriteHCLResourceFile(g.logger, containers, tfpath("containers.tf"), 0644); err != nil { + return err + } + + if err := g.terraformApply(context.TODO()); err != nil { + return err + } + + out, err := g.terraformOutputs(context.TODO()) + if err != nil { + return err + } + + return g.digestOutputs(out) +} + +func (g *Generator) DestroyAll() error { + return g.terraformDestroy(context.TODO(), false) +} + +func (g *Generator) DestroyAllQuietly() error { + return g.terraformDestroy(context.TODO(), true) +} + +func (g *Generator) terraformApply(ctx context.Context) error { + tfdir := filepath.Join(g.workdir, "terraform") + + if _, err := os.Stat(filepath.Join(tfdir, ".terraform")); err != nil { + if !os.IsNotExist(err) { + return err + } + + // On the fly init + g.logger.Info("Running 'terraform init'...") + if err := g.runner.TerraformExec(ctx, []string{"init", "-input=false"}, g.tfLogger, tfdir); err != nil { + return err + } + } + + g.logger.Info("Running 'terraform apply'...") + return g.runner.TerraformExec(ctx, []string{"apply", "-input=false", "-auto-approve"}, g.tfLogger, tfdir) +} + +func (g *Generator) terraformDestroy(ctx context.Context, quiet bool) error { + g.logger.Info("Running 'terraform destroy'...") + + var out io.Writer + if quiet { + out = io.Discard + } else { + out = g.tfLogger + } + + tfdir := filepath.Join(g.workdir, "terraform") + return g.runner.TerraformExec(ctx, []string{ + "destroy", "-input=false", "-auto-approve", "-refresh=false", + }, out, tfdir) +} + +func (g *Generator) terraformOutputs(ctx context.Context) (*Outputs, error) { + tfdir := filepath.Join(g.workdir, "terraform") + + var buf bytes.Buffer + err := g.runner.TerraformExec(ctx, []string{ + "output", "-json", + }, &buf, tfdir) + if err != nil { + return nil, err + } + + type outputVar struct { + // may be map[string]any + Value any `json:"value"` + } + + raw := make(map[string]*outputVar) + dec := json.NewDecoder(&buf) + if err := dec.Decode(&raw); err != nil { + return nil, err + } + + out := &Outputs{} + + for key, rv := range raw { + switch { + case strings.HasPrefix(key, "ports_"): + cluster, nid, ok := extractNodeOutputKey("ports_", key) + if !ok { + return nil, fmt.Errorf("unexpected output var: %s", key) + } + + ports := make(map[int]int) + for k, v := range rv.Value.(map[string]any) { + ki, err := strconv.Atoi(k) + if err != nil { + return nil, fmt.Errorf("unexpected port value %q: %w", k, err) + } + ports[ki] = int(v.(float64)) + } + out.SetNodePorts(cluster, nid, ports) + case strings.HasPrefix(key, "forwardproxyport_"): + netname := strings.TrimPrefix(key, "forwardproxyport_") + + found := rv.Value.(map[string]any) + if len(found) != 1 { + return nil, fmt.Errorf("found unexpected ports: %v", found) + } + got, ok := found[strconv.Itoa(proxyInternalPort)] + if !ok { + return nil, fmt.Errorf("found unexpected ports: %v", found) + } + + out.SetProxyPort(netname, int(got.(float64))) + } + } + + return out, nil +} + +func extractNodeOutputKey(prefix, key string) (string, topology.NodeID, bool) { + clusterNode := strings.TrimPrefix(key, prefix) + + cluster, nodeid, ok := strings.Cut(clusterNode, "_") + if !ok { + return "", topology.NodeID{}, false + } + + partition, node, ok := strings.Cut(nodeid, "_") + if !ok { + return "", topology.NodeID{}, false + } + + nid := topology.NewNodeID(node, partition) + return cluster, nid, true +} + +type Outputs struct { + ProxyPorts map[string]int // net -> exposed port + Nodes map[string]map[topology.NodeID]*NodeOutput // clusterID -> node -> stuff +} + +func (o *Outputs) SetNodePorts(cluster string, nid topology.NodeID, ports map[int]int) { + nodeOut := o.getNode(cluster, nid) + nodeOut.Ports = ports +} + +func (o *Outputs) SetProxyPort(net string, port int) { + if o.ProxyPorts == nil { + o.ProxyPorts = make(map[string]int) + } + o.ProxyPorts[net] = port +} + +func (o *Outputs) getNode(cluster string, nid topology.NodeID) *NodeOutput { + if o.Nodes == nil { + o.Nodes = make(map[string]map[topology.NodeID]*NodeOutput) + } + cnodes, ok := o.Nodes[cluster] + if !ok { + cnodes = make(map[topology.NodeID]*NodeOutput) + o.Nodes[cluster] = cnodes + } + + nodeOut, ok := cnodes[nid] + if !ok { + nodeOut = &NodeOutput{} + cnodes[nid] = nodeOut + } + + return nodeOut +} + +type NodeOutput struct { + Ports map[int]int `json:",omitempty"` +} diff --git a/testing/deployer/sprawl/internal/tfgen/io.go b/testing/deployer/sprawl/internal/tfgen/io.go new file mode 100644 index 000000000000..cd622536455b --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/io.go @@ -0,0 +1,70 @@ +package tfgen + +import ( + "bytes" + "os" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/rboyer/safeio" +) + +func WriteHCLResourceFile( + logger hclog.Logger, + res []Resource, + path string, + perm os.FileMode, +) (UpdateResult, error) { + var text []string + for _, r := range res { + val, err := r.Render() + if err != nil { + return UpdateResultNone, err + } + text = append(text, strings.TrimSpace(val)) + } + + body := strings.Join(text, "\n\n") + + // Ensure it looks tidy + out := hclwrite.Format(bytes.TrimSpace([]byte(body))) + + return UpdateFileIfDifferent(logger, out, path, perm) +} + +type UpdateResult int + +const ( + UpdateResultNone UpdateResult = iota + UpdateResultCreated + UpdateResultModified +) + +func UpdateFileIfDifferent( + logger hclog.Logger, + body []byte, + path string, + perm os.FileMode, +) (UpdateResult, error) { + prev, err := os.ReadFile(path) + + result := UpdateResultNone + if err != nil { + if !os.IsNotExist(err) { + return result, err + } + logger.Info("writing new file", "path", path) + result = UpdateResultCreated + } else { + // loaded + if bytes.Equal(body, prev) { + return result, nil + } + logger.Info("file has changed", "path", path) + result = UpdateResultModified + } + + _, err = safeio.WriteToFile(bytes.NewReader(body), path, perm) + return result, err +} diff --git a/testing/deployer/sprawl/internal/tfgen/nodes.go b/testing/deployer/sprawl/internal/tfgen/nodes.go new file mode 100644 index 000000000000..1c521f21c25b --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/nodes.go @@ -0,0 +1,249 @@ +package tfgen + +import ( + "fmt" + "sort" + "strconv" + "text/template" + + "github.com/hashicorp/consul/testing/deployer/topology" +) + +type terraformPod struct { + PodName string + Node *topology.Node + Ports []int + Labels map[string]string + TLSVolumeName string + DNSAddress string + DockerNetworkName string +} + +type terraformConsulAgent struct { + terraformPod + ImageResource string + HCL string + EnterpriseLicense string + Env []string +} + +type terraformMeshGatewayService struct { + terraformPod + EnvoyImageResource string + Service *topology.Service + Command []string +} + +type terraformService struct { + terraformPod + AppImageResource string + EnvoyImageResource string // agentful + DataplaneImageResource string // agentless + Service *topology.Service + Env []string + Command []string + EnvoyCommand []string // agentful +} + +func (g *Generator) generateNodeContainers( + step Step, + cluster *topology.Cluster, + node *topology.Node, +) ([]Resource, error) { + if node.Disabled { + return nil, fmt.Errorf("cannot generate containers for a disabled node") + } + + pod := terraformPod{ + PodName: node.PodName(), + Node: node, + Labels: map[string]string{ + "consulcluster-topology-id": g.topology.ID, + "consulcluster-cluster-name": node.Cluster, + }, + TLSVolumeName: cluster.TLSVolumeName, + DNSAddress: "8.8.8.8", + } + + cluster, ok := g.topology.Clusters[node.Cluster] + if !ok { + return nil, fmt.Errorf("no such cluster: %s", node.Cluster) + } + + net, ok := g.topology.Networks[cluster.NetworkName] + if !ok { + return nil, fmt.Errorf("no local network: %s", cluster.NetworkName) + } + if net.DNSAddress != "" { + pod.DNSAddress = net.DNSAddress + } + pod.DockerNetworkName = net.DockerName + + var ( + containers []Resource + ) + + if node.IsAgent() { + agentHCL, err := g.generateAgentHCL(node) + if err != nil { + return nil, err + } + + agent := terraformConsulAgent{ + terraformPod: pod, + ImageResource: DockerImageResourceName(node.Images.Consul), + HCL: agentHCL, + EnterpriseLicense: g.license, + Env: node.AgentEnv, + } + + switch { + case node.IsServer() && step.StartServers(), + !node.IsServer() && step.StartAgents(): + containers = append(containers, Eval(tfConsulT, &agent)) + } + } + + for _, svc := range node.SortedServices() { + if svc.IsMeshGateway { + if node.Kind == topology.NodeKindDataplane { + panic("NOT READY YET") + } + gw := terraformMeshGatewayService{ + terraformPod: pod, + EnvoyImageResource: DockerImageResourceName(node.Images.EnvoyConsulImage()), + Service: svc, + Command: []string{ + "consul", "connect", "envoy", + "-register", + "-mesh-gateway", + }, + } + if token := g.sec.ReadServiceToken(node.Cluster, svc.ID); token != "" { + gw.Command = append(gw.Command, "-token", token) + } + if cluster.Enterprise { + gw.Command = append(gw.Command, + "-partition", + svc.ID.Partition, + ) + } + gw.Command = append(gw.Command, + "-address", + `{{ GetInterfaceIP \"eth0\" }}:`+strconv.Itoa(svc.Port), + "-wan-address", + `{{ GetInterfaceIP \"eth1\" }}:`+strconv.Itoa(svc.Port), + ) + gw.Command = append(gw.Command, + "-grpc-addr", "http://127.0.0.1:8502", + "-admin-bind", + // for demo purposes + "0.0.0.0:"+strconv.Itoa(svc.EnvoyAdminPort), + "--", + "-l", + "trace", + ) + if step.StartServices() { + containers = append(containers, Eval(tfMeshGatewayT, &gw)) + } + } else { + tfsvc := terraformService{ + terraformPod: pod, + AppImageResource: DockerImageResourceName(svc.Image), + Service: svc, + Command: svc.Command, + } + tfsvc.Env = append(tfsvc.Env, svc.Env...) + if step.StartServices() { + containers = append(containers, Eval(tfAppT, &tfsvc)) + } + + setenv := func(k, v string) { + tfsvc.Env = append(tfsvc.Env, k+"="+v) + } + + if !svc.DisableServiceMesh { + if node.IsDataplane() { + tfsvc.DataplaneImageResource = DockerImageResourceName(node.Images.LocalDataplaneImage()) + tfsvc.EnvoyImageResource = "" + tfsvc.EnvoyCommand = nil + // --- REQUIRED --- + setenv("DP_CONSUL_ADDRESSES", "server."+node.Cluster+"-consulcluster.lan") + setenv("DP_SERVICE_NODE_NAME", node.PodName()) + setenv("DP_PROXY_SERVICE_ID", svc.ID.Name+"-sidecar-proxy") + } else { + tfsvc.DataplaneImageResource = "" + tfsvc.EnvoyImageResource = DockerImageResourceName(node.Images.EnvoyConsulImage()) + tfsvc.EnvoyCommand = []string{ + "consul", "connect", "envoy", + "-sidecar-for", svc.ID.Name, + } + } + if cluster.Enterprise { + if node.IsDataplane() { + setenv("DP_SERVICE_NAMESPACE", svc.ID.Namespace) + setenv("DP_SERVICE_PARTITION", svc.ID.Partition) + } else { + tfsvc.EnvoyCommand = append(tfsvc.EnvoyCommand, + "-partition", + svc.ID.Partition, + "-namespace", + svc.ID.Namespace, + ) + } + } + if token := g.sec.ReadServiceToken(node.Cluster, svc.ID); token != "" { + if node.IsDataplane() { + setenv("DP_CREDENTIAL_TYPE", "static") + setenv("DP_CREDENTIAL_STATIC_TOKEN", token) + } else { + tfsvc.EnvoyCommand = append(tfsvc.EnvoyCommand, "-token", token) + } + } + if node.IsDataplane() { + setenv("DP_ENVOY_ADMIN_BIND_ADDRESS", "0.0.0.0") // for demo purposes + setenv("DP_ENVOY_ADMIN_BIND_PORT", "19000") + setenv("DP_LOG_LEVEL", "trace") + + setenv("DP_CA_CERTS", "/consul/config/certs/consul-agent-ca.pem") + setenv("DP_CONSUL_GRPC_PORT", "8503") + setenv("DP_TLS_SERVER_NAME", "server."+node.Datacenter+".consul") + } else { + tfsvc.EnvoyCommand = append(tfsvc.EnvoyCommand, + "-grpc-addr", "http://127.0.0.1:8502", + "-admin-bind", + // for demo purposes + "0.0.0.0:"+strconv.Itoa(svc.EnvoyAdminPort), + "--", + "-l", + "trace", + ) + } + if step.StartServices() { + sort.Strings(tfsvc.Env) + + if node.IsDataplane() { + containers = append(containers, Eval(tfAppDataplaneT, &tfsvc)) + } else { + containers = append(containers, Eval(tfAppSidecarT, &tfsvc)) + } + } + } + } + } + + // Wait until the very end to render the pod so we know all of the ports. + pod.Ports = node.SortedPorts() + + // pod placeholder container + containers = append(containers, Eval(tfPauseT, &pod)) + + return containers, nil +} + +var tfPauseT = template.Must(template.ParseFS(content, "templates/container-pause.tf.tmpl")) +var tfConsulT = template.Must(template.ParseFS(content, "templates/container-consul.tf.tmpl")) +var tfMeshGatewayT = template.Must(template.ParseFS(content, "templates/container-mgw.tf.tmpl")) +var tfAppT = template.Must(template.ParseFS(content, "templates/container-app.tf.tmpl")) +var tfAppSidecarT = template.Must(template.ParseFS(content, "templates/container-app-sidecar.tf.tmpl")) +var tfAppDataplaneT = template.Must(template.ParseFS(content, "templates/container-app-dataplane.tf.tmpl")) diff --git a/testing/deployer/sprawl/internal/tfgen/prelude.go b/testing/deployer/sprawl/internal/tfgen/prelude.go new file mode 100644 index 000000000000..7a10c8c5da25 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/prelude.go @@ -0,0 +1,16 @@ +package tfgen + +const terraformPrelude = `provider "docker" { + host = "unix:///var/run/docker.sock" +} + +terraform { + required_providers { + docker = { + source = "kreuzwerker/docker" + version = "~> 2.0" + } + } + required_version = ">= 0.13" +} +` diff --git a/testing/deployer/sprawl/internal/tfgen/proxy.go b/testing/deployer/sprawl/internal/tfgen/proxy.go new file mode 100644 index 000000000000..9c4c6bb4575a --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/proxy.go @@ -0,0 +1,87 @@ +package tfgen + +import ( + "fmt" + "os" + "path/filepath" + "text/template" + + "github.com/hashicorp/consul/testing/deployer/topology" + "github.com/hashicorp/consul/testing/deployer/util" +) + +const proxyInternalPort = 80 + +func (g *Generator) writeNginxConfig(net *topology.Network) (bool, string, error) { + rootdir := filepath.Join(g.workdir, "terraform", "nginx-config-"+net.Name) + if err := os.MkdirAll(rootdir, 0755); err != nil { + return false, "", err + } + + configFile := filepath.Join(rootdir, "nginx.conf") + + body := fmt.Sprintf(` +server { + listen %d; + + location / { + resolver 8.8.8.8; + ############## + # Relevant config knobs are here: https://nginx.org/en/docs/http/ngx_http_proxy_module.html + ############## + proxy_pass http://$http_host$uri$is_args$args; + proxy_cache off; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_connect_timeout 5s; + proxy_read_timeout 5s; + proxy_send_timeout 5s; + proxy_request_buffering off; + proxy_buffering off; + } +} +`, proxyInternalPort) + + _, err := UpdateFileIfDifferent( + g.logger, + []byte(body), + configFile, + 0644, + ) + if err != nil { + return false, "", fmt.Errorf("error writing %q: %w", configFile, err) + } + + hash, err := util.HashFile(configFile) + if err != nil { + return false, "", fmt.Errorf("error hashing %q: %w", configFile, err) + } + + return true, hash, err +} + +func (g *Generator) getForwardProxyContainer( + net *topology.Network, + ipAddress string, + hash string, +) Resource { + env := []string{"HASH_FILE_VALUE=" + hash} + proxy := struct { + Name string + DockerNetworkName string + InternalPort int + IPAddress string + Env []string + }{ + Name: net.Name, + DockerNetworkName: net.DockerName, + InternalPort: proxyInternalPort, + IPAddress: ipAddress, + Env: env, + } + + return Eval(tfForwardProxyT, &proxy) +} + +var tfForwardProxyT = template.Must(template.ParseFS(content, "templates/container-proxy.tf.tmpl")) diff --git a/testing/deployer/sprawl/internal/tfgen/res.go b/testing/deployer/sprawl/internal/tfgen/res.go new file mode 100644 index 000000000000..c48cd7d8f216 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/res.go @@ -0,0 +1,95 @@ +package tfgen + +import ( + "bytes" + "text/template" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/hcl/v2/hclwrite" +) + +type FileResource struct { + name string + res Resource +} + +func (r *FileResource) Name() string { return r.name } + +func (r *FileResource) Commit(logger hclog.Logger) error { + val, err := r.res.Render() + if err != nil { + return err + } + _, err = UpdateFileIfDifferent(logger, []byte(val), r.name, 0644) + return err +} + +func File(name string, res Resource) *FileResource { + return &FileResource{name: name, res: res} +} + +func Text(s string) Resource { + return &textResource{text: s} +} + +func Embed(name string) Resource { + return &embedResource{name: name} +} + +func Eval(t *template.Template, data any) Resource { + return &evalResource{template: t, data: data, hcl: false} +} + +func HCL(t *template.Template, data any) Resource { + return &evalResource{template: t, data: data, hcl: true} +} + +type Resource interface { + Render() (string, error) +} + +type embedResource struct { + name string +} + +func (r *embedResource) Render() (string, error) { + val, err := content.ReadFile(r.name) + if err != nil { + return "", err + } + return string(val), nil +} + +type textResource struct { + text string +} + +func (r *textResource) Render() (string, error) { + return r.text, nil +} + +type evalResource struct { + template *template.Template + data any + hcl bool +} + +func (r *evalResource) Render() (string, error) { + out, err := StringTemplate(r.template, r.data) + if err != nil { + return "", err + } + + if r.hcl { + return string(hclwrite.Format([]byte(out))), nil + } + return out, nil +} + +func StringTemplate(t *template.Template, data any) (string, error) { + var res bytes.Buffer + if err := t.Execute(&res, data); err != nil { + return "", err + } + return res.String(), nil +} diff --git a/testing/deployer/sprawl/internal/tfgen/templates/container-app-dataplane.tf.tmpl b/testing/deployer/sprawl/internal/tfgen/templates/container-app-dataplane.tf.tmpl new file mode 100644 index 000000000000..bfb0705e6df8 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/templates/container-app-dataplane.tf.tmpl @@ -0,0 +1,29 @@ +resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" { + name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" + network_mode = "container:${docker_container.{{.PodName}}.id}" + image = docker_image.{{.DataplaneImageResource}}.latest + restart = "on-failure" + +{{- range $k, $v := .Labels }} + labels { + label = "{{ $k }}" + value = "{{ $v }}" + } +{{- end }} + + volumes { + volume_name = "{{.TLSVolumeName}}" + container_path = "/consul/config/certs" + read_only = true + } + + env = [ +{{- range .Env }} + "{{.}}", +{{- end}} + ] + + command = [ + "/usr/local/bin/consul-dataplane", + ] +} diff --git a/testing/deployer/sprawl/internal/tfgen/templates/container-app-sidecar.tf.tmpl b/testing/deployer/sprawl/internal/tfgen/templates/container-app-sidecar.tf.tmpl new file mode 100644 index 000000000000..6abb397c2534 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/templates/container-app-sidecar.tf.tmpl @@ -0,0 +1,31 @@ +resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" { + name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" + network_mode = "container:${docker_container.{{.PodName}}.id}" + image = docker_image.{{.EnvoyImageResource}}.latest + restart = "on-failure" + +{{- range $k, $v := .Labels }} + labels { + label = "{{ $k }}" + value = "{{ $v }}" + } +{{- end }} + + volumes { + volume_name = "{{.TLSVolumeName}}" + container_path = "/consul/config/certs" + read_only = true + } + + env = [ +{{- range .Env }} + "{{.}}", +{{- end}} + ] + + command = [ +{{- range .EnvoyCommand }} + "{{.}}", +{{- end }} + ] +} diff --git a/testing/deployer/sprawl/internal/tfgen/templates/container-app.tf.tmpl b/testing/deployer/sprawl/internal/tfgen/templates/container-app.tf.tmpl new file mode 100644 index 000000000000..b1b390f0f16c --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/templates/container-app.tf.tmpl @@ -0,0 +1,25 @@ +resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" { + name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}" + network_mode = "container:${docker_container.{{.PodName}}.id}" + image = docker_image.{{.AppImageResource}}.latest + restart = "on-failure" + +{{- range $k, $v := .Labels }} + labels { + label = "{{ $k }}" + value = "{{ $v }}" + } +{{- end }} + + env = [ +{{- range .Env }} + "{{.}}", +{{- end}} + ] + + command = [ +{{- range .Command }} + "{{.}}", +{{- end }} + ] +} diff --git a/testing/deployer/sprawl/internal/tfgen/templates/container-consul.tf.tmpl b/testing/deployer/sprawl/internal/tfgen/templates/container-consul.tf.tmpl new file mode 100644 index 000000000000..01f7f3fb4d7d --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/templates/container-consul.tf.tmpl @@ -0,0 +1,40 @@ +resource "docker_container" "{{.Node.DockerName}}" { + name = "{{.Node.DockerName}}" + network_mode = "container:${docker_container.{{.PodName}}.id}" + image = docker_image.{{.ImageResource}}.latest + restart = "always" + + env = [ + "CONSUL_UID=0", + "CONSUL_GID=0", + "CONSUL_LICENSE={{.EnterpriseLicense}}", +{{- range .Env }} + "{{.}}", +{{- end}} + ] + +{{- range $k, $v := .Labels }} + labels { + label = "{{ $k }}" + value = "{{ $v }}" + } +{{- end }} + + command = [ + "agent", + "-hcl", + <<-EOT +{{ .HCL }} +EOT + ] + + volumes { + volume_name = "{{.Node.DockerName}}" + container_path = "/consul/data" + } + + volumes { + volume_name = "{{.TLSVolumeName}}" + container_path = "/consul/config/certs" + } +} diff --git a/testing/deployer/sprawl/internal/tfgen/templates/container-coredns.tf.tmpl b/testing/deployer/sprawl/internal/tfgen/templates/container-coredns.tf.tmpl new file mode 100644 index 000000000000..7789376a98f1 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/templates/container-coredns.tf.tmpl @@ -0,0 +1,28 @@ +resource "docker_container" "{{.DockerNetworkName}}-coredns" { + name = "{{.DockerNetworkName}}-coredns" + image = docker_image.coredns.latest + restart = "always" + dns = ["8.8.8.8"] + + networks_advanced { + name = docker_network.{{.DockerNetworkName}}.name + ipv4_address = "{{.IPAddress}}" + } + + env = [ +{{- range .Env }} + "{{.}}", +{{- end}} + ] + + volumes { + host_path = abspath("coredns-config-{{.Name}}") + container_path = "/config" + read_only = true + } + + command = [ + "-conf", + "/config/Corefile", + ] +} diff --git a/testing/deployer/sprawl/internal/tfgen/templates/container-mgw.tf.tmpl b/testing/deployer/sprawl/internal/tfgen/templates/container-mgw.tf.tmpl new file mode 100644 index 000000000000..ec25665f3ed8 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/templates/container-mgw.tf.tmpl @@ -0,0 +1,25 @@ +resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" { + name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}" + network_mode = "container:${docker_container.{{.PodName}}.id}" + image = docker_image.{{.EnvoyImageResource}}.latest + restart = "on-failure" + +{{- range $k, $v := .Labels }} + labels { + label = "{{ $k }}" + value = "{{ $v }}" + } +{{- end }} + + volumes { + volume_name = "{{.TLSVolumeName}}" + container_path = "/consul/config/certs" + read_only = true + } + + command = [ +{{- range .Command }} + "{{.}}", +{{- end }} + ] +} diff --git a/testing/deployer/sprawl/internal/tfgen/templates/container-pause.tf.tmpl b/testing/deployer/sprawl/internal/tfgen/templates/container-pause.tf.tmpl new file mode 100644 index 000000000000..1f1627b0719b --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/templates/container-pause.tf.tmpl @@ -0,0 +1,38 @@ +resource "docker_container" "{{.PodName}}" { + name = "{{.PodName}}" + image = docker_image.pause.latest + hostname = "{{.PodName}}" + restart = "always" + dns = ["{{.DNSAddress}}"] + +{{- range $k, $v := .Labels }} + labels { + label = "{{ $k }}" + value = "{{ $v }}" + } +{{- end }} + +depends_on = [ + docker_container.{{.DockerNetworkName}}-coredns, + docker_container.{{.DockerNetworkName}}-forwardproxy, +] + +{{- range .Ports }} +ports { + internal = {{.}} +} +{{- end }} + +{{- range .Node.Addresses }} +networks_advanced { + name = docker_network.{{.DockerNetworkName}}.name + ipv4_address = "{{.IPAddress}}" +} +{{- end }} +} + +output "ports_{{.Node.Cluster}}_{{.Node.Partition}}_{{.Node.Name}}" { + value = { + for port in docker_container.{{.PodName}}.ports : port.internal => port.external + } +} diff --git a/testing/deployer/sprawl/internal/tfgen/templates/container-proxy.tf.tmpl b/testing/deployer/sprawl/internal/tfgen/templates/container-proxy.tf.tmpl new file mode 100644 index 000000000000..ed44d8343fe8 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/templates/container-proxy.tf.tmpl @@ -0,0 +1,33 @@ +resource "docker_container" "{{.DockerNetworkName}}-forwardproxy" { + name = "{{.DockerNetworkName}}-forwardproxy" + image = docker_image.nginx.latest + restart = "always" + dns = ["8.8.8.8"] + + ports { + internal = {{.InternalPort}} + } + + networks_advanced { + name = docker_network.{{.DockerNetworkName}}.name + ipv4_address = "{{.IPAddress}}" + } + + env = [ +{{- range .Env }} + "{{.}}", +{{- end}} + ] + + volumes { + host_path = abspath("nginx-config-{{.Name}}/nginx.conf") + container_path = "/etc/nginx/conf.d/default.conf" + read_only = true + } +} + +output "forwardproxyport_{{.Name}}" { + value = { + for port in docker_container.{{.DockerNetworkName}}-forwardproxy.ports : port.internal => port.external + } +} diff --git a/testing/deployer/sprawl/internal/tfgen/tfgen.go b/testing/deployer/sprawl/internal/tfgen/tfgen.go new file mode 100644 index 000000000000..7eeb84b16789 --- /dev/null +++ b/testing/deployer/sprawl/internal/tfgen/tfgen.go @@ -0,0 +1,15 @@ +package tfgen + +import ( + "embed" +) + +//go:embed templates/container-app-dataplane.tf.tmpl +//go:embed templates/container-app-sidecar.tf.tmpl +//go:embed templates/container-app.tf.tmpl +//go:embed templates/container-consul.tf.tmpl +//go:embed templates/container-mgw.tf.tmpl +//go:embed templates/container-pause.tf.tmpl +//go:embed templates/container-proxy.tf.tmpl +//go:embed templates/container-coredns.tf.tmpl +var content embed.FS diff --git a/testing/deployer/sprawl/peering.go b/testing/deployer/sprawl/peering.go new file mode 100644 index 000000000000..e88786a1b0d4 --- /dev/null +++ b/testing/deployer/sprawl/peering.go @@ -0,0 +1,165 @@ +package sprawl + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/consul/testing/deployer/topology" +) + +// TODO: this is definitely a grpc resolver/balancer issue to look into +const grpcWeirdError = `transport: Error while dialing failed to find Consul server for global address` + +func isWeirdGRPCError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), grpcWeirdError) +} + +func (s *Sprawl) initPeerings() error { + // TODO: wait until services are healthy? wait until mesh gateways work? + // if err := s.generator.Generate(tfgen.StepPeering); err != nil { + // return fmt.Errorf("generator[peering]: %w", err) + // } + + var ( + logger = s.logger.Named("peering") + _ = logger + ) + + for _, peering := range s.topology.Peerings { + dialingCluster, ok := s.topology.Clusters[peering.Dialing.Name] + if !ok { + return fmt.Errorf("peering references dialing cluster that does not exist: %s", peering.String()) + } + acceptingCluster, ok := s.topology.Clusters[peering.Accepting.Name] + if !ok { + return fmt.Errorf("peering references accepting cluster that does not exist: %s", peering.String()) + } + + var ( + dialingClient = s.clients[dialingCluster.Name] + acceptingClient = s.clients[acceptingCluster.Name] + ) + + // TODO: allow for use of ServerExternalAddresses + + req1 := api.PeeringGenerateTokenRequest{ + PeerName: peering.Accepting.PeerName, + } + if acceptingCluster.Enterprise { + req1.Partition = peering.Accepting.Partition + } + + GENTOKEN: + resp, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req1, nil) + if err != nil { + if isWeirdGRPCError(err) { + time.Sleep(50 * time.Millisecond) + goto GENTOKEN + } + return fmt.Errorf("error generating peering token for %q: %w", peering.String(), err) + } + + peeringToken := resp.PeeringToken + logger.Info("generated peering token", "peering", peering.String()) + + req2 := api.PeeringEstablishRequest{ + PeerName: peering.Dialing.PeerName, + PeeringToken: peeringToken, + } + if dialingCluster.Enterprise { + req2.Partition = peering.Dialing.Partition + } + + logger.Info("establishing peering with token", "peering", peering.String()) + ESTABLISH: + _, _, err = dialingClient.Peerings().Establish(context.Background(), req2, nil) + if err != nil { + if isWeirdGRPCError(err) { + time.Sleep(50 * time.Millisecond) + goto ESTABLISH + } + return fmt.Errorf("error establishing peering with token for %q: %w", peering.String(), err) + } + + logger.Info("peering established", "peering", peering.String()) + } + + return nil +} + +func (s *Sprawl) waitForPeeringEstablishment() error { + var ( + logger = s.logger.Named("peering") + ) + + for _, peering := range s.topology.Peerings { + dialingCluster, ok := s.topology.Clusters[peering.Dialing.Name] + if !ok { + return fmt.Errorf("peering references dialing cluster that does not exist: %s", peering.String()) + } + acceptingCluster, ok := s.topology.Clusters[peering.Accepting.Name] + if !ok { + return fmt.Errorf("peering references accepting cluster that does not exist: %s", peering.String()) + } + + var ( + dialingClient = s.clients[dialingCluster.Name] + acceptingClient = s.clients[acceptingCluster.Name] + + dialingLogger = logger.With( + "cluster", dialingCluster.Name, + "peering", peering.String(), + ) + acceptingLogger = logger.With( + "cluster", acceptingCluster.Name, + "peering", peering.String(), + ) + ) + + s.checkPeeringDirection(dialingLogger, dialingClient, peering.Dialing, dialingCluster.Enterprise) + s.checkPeeringDirection(acceptingLogger, acceptingClient, peering.Accepting, acceptingCluster.Enterprise) + } + return nil +} + +func (s *Sprawl) checkPeeringDirection(logger hclog.Logger, client *api.Client, pc topology.PeerCluster, enterprise bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for { + opts := &api.QueryOptions{} + if enterprise { + opts.Partition = pc.Partition + } + res, _, err := client.Peerings().Read(ctx, pc.PeerName, opts) + if isWeirdGRPCError(err) { + time.Sleep(50 * time.Millisecond) + continue + } + if err != nil { + logger.Info("error looking up peering", "error", err) + time.Sleep(100 * time.Millisecond) + continue + } + if res == nil { + logger.Info("peering not found") + time.Sleep(100 * time.Millisecond) + continue + } + + if res.State == api.PeeringStateActive { + logger.Info("peering is active") + return + } + logger.Info("peering not active yet", "state", res.State) + time.Sleep(500 * time.Millisecond) + } +} diff --git a/testing/deployer/sprawl/sprawl.go b/testing/deployer/sprawl/sprawl.go new file mode 100644 index 000000000000..7a3335bc0a1f --- /dev/null +++ b/testing/deployer/sprawl/sprawl.go @@ -0,0 +1,464 @@ +package sprawl + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/copystructure" + + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner" + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets" + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/tfgen" + "github.com/hashicorp/consul/testing/deployer/topology" + "github.com/hashicorp/consul/testing/deployer/util" +) + +// TODO: manage workdir externally without chdir + +// Sprawl is the definition of a complete running Consul deployment topology. +type Sprawl struct { + logger hclog.Logger + runner *runner.Runner + license string + secrets secrets.Store + + workdir string + + // set during Run + config *topology.Config + topology *topology.Topology + generator *tfgen.Generator + + clients map[string]*api.Client // one per cluster +} + +// Topology allows access to the topology that defines the resources. Do not +// write to any of these fields. +func (s *Sprawl) Topology() *topology.Topology { + return s.topology +} + +func (s *Sprawl) Config() *topology.Config { + c2, err := copyConfig(s.config) + if err != nil { + panic(err) + } + return c2 +} + +func (s *Sprawl) HTTPClientForCluster(clusterName string) (*http.Client, error) { + cluster, ok := s.topology.Clusters[clusterName] + if !ok { + return nil, fmt.Errorf("no such cluster: %s", clusterName) + } + + // grab the local network for the cluster + network, ok := s.topology.Networks[cluster.NetworkName] + if !ok { + return nil, fmt.Errorf("no such network: %s", cluster.NetworkName) + } + + transport, err := util.ProxyHTTPTransport(network.ProxyPort) + if err != nil { + return nil, err + } + + return &http.Client{Transport: transport}, nil +} + +// APIClientForNode gets a pooled api.Client connected to the agent running on +// the provided node. +// +// Passing an empty token will assume the bootstrap token. If you want to +// actually use the anonymous token say "-". +func (s *Sprawl) APIClientForNode(clusterName string, nid topology.NodeID, token string) (*api.Client, error) { + cluster, ok := s.topology.Clusters[clusterName] + if !ok { + return nil, fmt.Errorf("no such cluster: %s", clusterName) + } + + nid.Normalize() + + node := cluster.NodeByID(nid) + if !node.IsAgent() { + return nil, fmt.Errorf("node is not an agent") + } + + switch token { + case "": + token = s.secrets.ReadGeneric(clusterName, secrets.BootstrapToken) + case "-": + token = "" + } + + return util.ProxyAPIClient( + node.LocalProxyPort(), + node.LocalAddress(), + 8500, + token, + ) +} + +func copyConfig(cfg *topology.Config) (*topology.Config, error) { + dup, err := copystructure.Copy(cfg) + if err != nil { + return nil, err + } + return dup.(*topology.Config), nil +} + +// Launch will create the topology defined by the provided configuration and +// bring up all of the relevant clusters. Once created the Stop method must be +// called to destroy everything. +func Launch( + logger hclog.Logger, + workdir string, + cfg *topology.Config, +) (*Sprawl, error) { + if logger == nil { + panic("logger is required") + } + if workdir == "" { + panic("workdir is required") + } + + if err := os.MkdirAll(filepath.Join(workdir, "terraform"), 0755); err != nil { + return nil, err + } + + runner, err := runner.Load(logger) + if err != nil { + return nil, err + } + + // Copy this to avoid leakage. + cfg, err = copyConfig(cfg) + if err != nil { + return nil, err + } + + s := &Sprawl{ + logger: logger, + runner: runner, + workdir: workdir, + clients: make(map[string]*api.Client), + } + + if err := s.ensureLicense(); err != nil { + return nil, err + } + + // Copy this AGAIN, BEFORE compiling so we capture the original definition, without denorms. + s.config, err = copyConfig(cfg) + if err != nil { + return nil, err + } + + s.topology, err = topology.Compile(logger.Named("compile"), cfg) + if err != nil { + return nil, fmt.Errorf("topology.Compile: %w", err) + } + + s.logger.Info("compiled topology", "ct", jd(s.topology)) // TODO + + start := time.Now() + if err := s.launch(); err != nil { + return nil, err + } + s.logger.Info("topology is ready for use", "elapsed", time.Since(start)) + + if err := s.PrintDetails(); err != nil { + return nil, fmt.Errorf("error gathering diagnostic details: %w", err) + } + + return s, nil +} + +func (s *Sprawl) Relaunch( + cfg *topology.Config, +) error { + // Copy this BEFORE compiling so we capture the original definition, without denorms. + var err error + s.config, err = copyConfig(cfg) + if err != nil { + return err + } + + newTopology, err := topology.Recompile(s.logger.Named("recompile"), cfg, s.topology) + if err != nil { + return fmt.Errorf("topology.Compile: %w", err) + } + + s.topology = newTopology + + s.logger.Info("compiled replacement topology", "ct", jd(s.topology)) // TODO + + start := time.Now() + if err := s.relaunch(); err != nil { + return err + } + s.logger.Info("topology is ready for use", "elapsed", time.Since(start)) + + if err := s.PrintDetails(); err != nil { + return fmt.Errorf("error gathering diagnostic details: %w", err) + } + + return nil +} + +// Leader returns the cluster leader agent, or an error if no leader is +// available. +func (s *Sprawl) Leader(clusterName string) (*topology.Node, error) { + cluster, ok := s.topology.Clusters[clusterName] + if !ok { + return nil, fmt.Errorf("no such cluster: %s", clusterName) + } + + var ( + client = s.clients[cluster.Name] + // logger = s.logger.With("cluster", cluster.Name) + ) + + leaderAddr, err := getLeader(client) + if err != nil { + return nil, err + } + + for _, node := range cluster.Nodes { + if !node.IsServer() || node.Disabled { + continue + } + if strings.HasPrefix(leaderAddr, node.LocalAddress()+":") { + return node, nil + } + } + + return nil, fmt.Errorf("leader not found") +} + +// Followers returns the cluster following servers. +func (s *Sprawl) Followers(clusterName string) ([]*topology.Node, error) { + cluster, ok := s.topology.Clusters[clusterName] + if !ok { + return nil, fmt.Errorf("no such cluster: %s", clusterName) + } + + leaderNode, err := s.Leader(clusterName) + if err != nil { + return nil, fmt.Errorf("could not determine leader: %w", err) + } + + var followers []*topology.Node + + for _, node := range cluster.Nodes { + if !node.IsServer() || node.Disabled { + continue + } + if node.ID() != leaderNode.ID() { + followers = append(followers, node) + } + } + + return followers, nil +} + +func (s *Sprawl) DisabledServers(clusterName string) ([]*topology.Node, error) { + cluster, ok := s.topology.Clusters[clusterName] + if !ok { + return nil, fmt.Errorf("no such cluster: %s", clusterName) + } + + var servers []*topology.Node + + for _, node := range cluster.Nodes { + if !node.IsServer() || !node.Disabled { + continue + } + servers = append(servers, node) + } + + return servers, nil +} + +func (s *Sprawl) StopContainer(ctx context.Context, containerName string) error { + return s.runner.DockerExec(ctx, []string{"stop", containerName}, nil, nil) +} + +func (s *Sprawl) SnapshotEnvoy(ctx context.Context) error { + snapDir := filepath.Join(s.workdir, "envoy-snapshots") + if err := os.MkdirAll(snapDir, 0755); err != nil { + return fmt.Errorf("could not create envoy snapshot output dir %s: %w", snapDir, err) + } + + targets := map[string]string{ + "config_dump.json": "config_dump", + "clusters.json": "clusters?format=json", + "stats.txt": "stats", + "stats_prometheus.txt": "stats/prometheus", + } + + var merr error + for _, c := range s.topology.Clusters { + client, err := s.HTTPClientForCluster(c.Name) + if err != nil { + return fmt.Errorf("could not get http client for cluster %q: %w", c.Name, err) + } + + for _, n := range c.Nodes { + if n.Disabled { + continue + } + for _, s := range n.Services { + if s.Disabled || s.EnvoyAdminPort <= 0 { + continue + } + prefix := fmt.Sprintf("http://%s:%d", n.LocalAddress(), s.EnvoyAdminPort) + + for fn, target := range targets { + u := prefix + "/" + target + + body, err := scrapeURL(client, u) + if err != nil { + merr = multierror.Append(merr, fmt.Errorf("could not scrape %q for %s on %s: %w", + target, s.ID.String(), n.ID().String(), err, + )) + continue + } + + outFn := filepath.Join(snapDir, n.DockerName()+"--"+s.ID.TFString()+"."+fn) + + if err := os.WriteFile(outFn+".tmp", body, 0644); err != nil { + merr = multierror.Append(merr, fmt.Errorf("could not write output %q for %s on %s: %w", + target, s.ID.String(), n.ID().String(), err, + )) + continue + } + + if err := os.Rename(outFn+".tmp", outFn); err != nil { + merr = multierror.Append(merr, fmt.Errorf("could not write output %q for %s on %s: %w", + target, s.ID.String(), n.ID().String(), err, + )) + continue + } + } + } + } + } + return merr +} + +func scrapeURL(client *http.Client, url string) ([]byte, error) { + res, err := client.Get(url) + if err != nil { + return nil, err + } + defer res.Body.Close() + + body, err := io.ReadAll(res.Body) + if err != nil { + return nil, err + } + return body, nil +} + +func (s *Sprawl) CaptureLogs(ctx context.Context) error { + logDir := filepath.Join(s.workdir, "logs") + if err := os.MkdirAll(logDir, 0755); err != nil { + return fmt.Errorf("could not create log output dir %s: %w", logDir, err) + } + + containers, err := s.listContainers(ctx) + if err != nil { + return err + } + + s.logger.Info("Capturing logs") + + var merr error + for _, container := range containers { + if err := s.dumpContainerLogs(ctx, container, logDir); err != nil { + merr = multierror.Append(merr, fmt.Errorf("could not dump logs for container %s: %w", container, err)) + } + } + + return merr +} + +// Dump known containers out of terraform state file. +func (s *Sprawl) listContainers(ctx context.Context) ([]string, error) { + tfdir := filepath.Join(s.workdir, "terraform") + + var buf bytes.Buffer + if err := s.runner.TerraformExec(ctx, []string{"state", "list"}, &buf, tfdir); err != nil { + return nil, fmt.Errorf("error listing containers in terraform state file: %w", err) + } + + var ( + scan = bufio.NewScanner(&buf) + containers []string + ) + for scan.Scan() { + line := strings.TrimSpace(scan.Text()) + + name := strings.TrimPrefix(line, "docker_container.") + if name != line { + containers = append(containers, name) + continue + } + } + if err := scan.Err(); err != nil { + return nil, err + } + + return containers, nil +} + +func (s *Sprawl) dumpContainerLogs(ctx context.Context, containerName, outputRoot string) error { + path := filepath.Join(outputRoot, containerName+".log") + + f, err := os.Create(path + ".tmp") + if err != nil { + return err + } + keep := false + defer func() { + _ = f.Close() + if !keep { + _ = os.Remove(path + ".tmp") + _ = os.Remove(path) + } + }() + + err = s.runner.DockerExecWithStderr( + ctx, + []string{"logs", containerName}, + f, + f, + nil, + ) + if err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + if err := os.Rename(path+".tmp", path); err != nil { + return err + } + + keep = true + return nil +} diff --git a/testing/deployer/sprawl/sprawltest/sprawltest.go b/testing/deployer/sprawl/sprawltest/sprawltest.go new file mode 100644 index 000000000000..23ff44779b2c --- /dev/null +++ b/testing/deployer/sprawl/sprawltest/sprawltest.go @@ -0,0 +1,202 @@ +package sprawltest + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "sync" + "testing" + + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/testing/deployer/sprawl" + "github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner" + "github.com/hashicorp/consul/testing/deployer/topology" +) + +// TODO(rb): move comments to doc.go + +var ( + // set SPRAWL_WORKDIR_ROOT in the environment to have the test output + // coalesced in here. By default it uses a directory called "workdir" in + // each package. + workdirRoot string + + // set SPRAWL_KEEP_WORKDIR=1 in the environment to keep the workdir output + // intact. Files are all destroyed by default. + keepWorkdirOnFail bool + + // set SPRAWL_KEEP_RUNNING=1 in the environment to keep the workdir output + // intact and also refrain from tearing anything down. Things are all + // destroyed by default. + // + // SPRAWL_KEEP_RUNNING=1 implies SPRAWL_KEEP_WORKDIR=1 + keepRunningOnFail bool + + // set SPRAWL_SKIP_OLD_CLEANUP to prevent the library from tearing down and + // removing anything found in the working directory at init time. The + // default behavior is to do this. + skipOldCleanup bool +) + +var cleanupPriorRunOnce sync.Once + +func init() { + if root := os.Getenv("SPRAWL_WORKDIR_ROOT"); root != "" { + fmt.Fprintf(os.Stdout, "INFO: sprawltest: SPRAWL_WORKDIR_ROOT set; using %q as output root\n", root) + workdirRoot = root + } else { + workdirRoot = "workdir" + } + + if os.Getenv("SPRAWL_KEEP_WORKDIR") == "1" { + keepWorkdirOnFail = true + fmt.Fprintf(os.Stdout, "INFO: sprawltest: SPRAWL_KEEP_WORKDIR set; not destroying workdir on failure\n") + } + + if os.Getenv("SPRAWL_KEEP_RUNNING") == "1" { + keepRunningOnFail = true + keepWorkdirOnFail = true + fmt.Fprintf(os.Stdout, "INFO: sprawltest: SPRAWL_KEEP_RUNNING set; not tearing down resources on failure\n") + } + + if os.Getenv("SPRAWL_SKIP_OLD_CLEANUP") == "1" { + skipOldCleanup = true + fmt.Fprintf(os.Stdout, "INFO: sprawltest: SPRAWL_SKIP_OLD_CLEANUP set; not cleaning up anything found in %q\n", workdirRoot) + } + + if !skipOldCleanup { + cleanupPriorRunOnce.Do(func() { + fmt.Fprintf(os.Stdout, "INFO: sprawltest: triggering cleanup of any prior test runs\n") + CleanupWorkingDirectories() + }) + } +} + +// Launch will create the topology defined by the provided configuration and +// bring up all of the relevant clusters. +// +// - Logs will be routed to (*testing.T).Logf. +// +// - By default everything will be stopped and removed via +// (*testing.T).Cleanup. For failed tests, this can be skipped by setting the +// environment variable SKIP_TEARDOWN=1. +func Launch(t *testing.T, cfg *topology.Config) *sprawl.Sprawl { + SkipIfTerraformNotPresent(t) + sp, err := sprawl.Launch( + testutil.Logger(t), + initWorkingDirectory(t), + cfg, + ) + require.NoError(t, err) + stopOnCleanup(t, sp) + return sp +} + +func initWorkingDirectory(t *testing.T) string { + // TODO(rb): figure out how to get the calling package which we can put in + // the middle here, which is likely 2 call frames away so maybe + // runtime.Callers can help + scratchDir := filepath.Join(workdirRoot, t.Name()) + _ = os.RemoveAll(scratchDir) // cleanup prior runs + if err := os.MkdirAll(scratchDir, 0755); err != nil { + t.Fatalf("error: %v", err) + } + + t.Cleanup(func() { + if t.Failed() && keepWorkdirOnFail { + t.Logf("test failed; leaving sprawl terraform definitions in: %s", scratchDir) + } else { + _ = os.RemoveAll(scratchDir) + } + }) + + return scratchDir +} + +func stopOnCleanup(t *testing.T, sp *sprawl.Sprawl) { + t.Cleanup(func() { + if t.Failed() && keepWorkdirOnFail { + // It's only worth it to capture the logs if we aren't going to + // immediately discard them. + if err := sp.CaptureLogs(context.Background()); err != nil { + t.Logf("log capture encountered failures: %v", err) + } + if err := sp.SnapshotEnvoy(context.Background()); err != nil { + t.Logf("envoy snapshot capture encountered failures: %v", err) + } + } + + if t.Failed() && keepRunningOnFail { + t.Log("test failed; leaving sprawl running") + } else { + //nolint:errcheck + sp.Stop() + } + }) +} + +// CleanupWorkingDirectories is meant to run in an init() once at the start of +// any tests. +func CleanupWorkingDirectories() { + fi, err := os.ReadDir(workdirRoot) + if os.IsNotExist(err) { + return + } else if err != nil { + fmt.Fprintf(os.Stderr, "WARN: sprawltest: unable to scan 'workdir' for prior runs to cleanup\n") + return + } else if len(fi) == 0 { + fmt.Fprintf(os.Stdout, "INFO: sprawltest: no prior tests to clean up\n") + return + } + + r, err := runner.Load(hclog.NewNullLogger()) + if err != nil { + fmt.Fprintf(os.Stderr, "WARN: sprawltest: unable to look for 'terraform' and 'docker' binaries\n") + return + } + + ctx := context.Background() + + for _, d := range fi { + if !d.IsDir() { + continue + } + path := filepath.Join(workdirRoot, d.Name(), "terraform") + + fmt.Fprintf(os.Stdout, "INFO: sprawltest: cleaning up failed prior run in: %s\n", path) + + err := r.TerraformExec(ctx, []string{ + "init", "-input=false", + }, io.Discard, path) + + err2 := r.TerraformExec(ctx, []string{ + "destroy", "-input=false", "-auto-approve", "-refresh=false", + }, io.Discard, path) + + if err2 != nil { + err = multierror.Append(err, err2) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "WARN: sprawltest: could not clean up failed prior run in: %s: %v\n", path, err) + } else { + _ = os.RemoveAll(path) + } + } +} + +func SkipIfTerraformNotPresent(t *testing.T) { + const terraformBinaryName = "terraform" + + path, err := exec.LookPath(terraformBinaryName) + if err != nil || path == "" { + t.Skipf("%q not found on $PATH - download and install to run this test", terraformBinaryName) + } +} diff --git a/testing/deployer/sprawl/sprawltest/test_test.go b/testing/deployer/sprawl/sprawltest/test_test.go new file mode 100644 index 000000000000..cdbeb4be52e5 --- /dev/null +++ b/testing/deployer/sprawl/sprawltest/test_test.go @@ -0,0 +1,180 @@ +package sprawltest_test + +import ( + "strconv" + "testing" + + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest" + "github.com/hashicorp/consul/testing/deployer/topology" +) + +func TestSprawl(t *testing.T) { + serversDC1 := newTopologyServerSet("dc1-server", 3, []string{"dc1", "wan"}, nil) + serversDC2 := newTopologyServerSet("dc2-server", 3, []string{"dc2", "wan"}, nil) + + cfg := &topology.Config{ + Networks: []*topology.Network{ + {Name: "dc1"}, + {Name: "dc2"}, + {Name: "wan", Type: "wan"}, + }, + Clusters: []*topology.Cluster{ + { + Name: "dc1", + Nodes: topology.MergeSlices(serversDC1, []*topology.Node{ + { + Kind: topology.NodeKindClient, + Name: "dc1-client1", + Services: []*topology.Service{ + { + ID: topology.ServiceID{Name: "mesh-gateway"}, + Port: 8443, + EnvoyAdminPort: 19000, + IsMeshGateway: true, + }, + }, + }, + { + Kind: topology.NodeKindClient, + Name: "dc1-client2", + Services: []*topology.Service{ + { + ID: topology.ServiceID{Name: "ping"}, + Image: "rboyer/pingpong:latest", + Port: 8080, + EnvoyAdminPort: 19000, + Command: []string{ + "-bind", "0.0.0.0:8080", + "-dial", "127.0.0.1:9090", + "-pong-chaos", + "-dialfreq", "250ms", + "-name", "ping", + }, + Upstreams: []*topology.Upstream{{ + ID: topology.ServiceID{Name: "pong"}, + LocalPort: 9090, + Peer: "peer-dc2-default", + }}, + }, + }, + }, + }), + InitialConfigEntries: []api.ConfigEntry{ + &api.ExportedServicesConfigEntry{ + Name: "default", + Services: []api.ExportedService{{ + Name: "ping", + Consumers: []api.ServiceConsumer{{ + Peer: "peer-dc2-default", + }}, + }}, + }, + }, + }, + { + Name: "dc2", + Nodes: topology.MergeSlices(serversDC2, []*topology.Node{ + { + Kind: topology.NodeKindClient, + Name: "dc2-client1", + Services: []*topology.Service{ + { + ID: topology.ServiceID{Name: "mesh-gateway"}, + Port: 8443, + EnvoyAdminPort: 19000, + IsMeshGateway: true, + }, + }, + }, + { + Kind: topology.NodeKindDataplane, + Name: "dc2-client2", + Services: []*topology.Service{ + { + ID: topology.ServiceID{Name: "pong"}, + Image: "rboyer/pingpong:latest", + Port: 8080, + EnvoyAdminPort: 19000, + Command: []string{ + "-bind", "0.0.0.0:8080", + "-dial", "127.0.0.1:9090", + "-pong-chaos", + "-dialfreq", "250ms", + "-name", "pong", + }, + Upstreams: []*topology.Upstream{{ + ID: topology.ServiceID{Name: "ping"}, + LocalPort: 9090, + Peer: "peer-dc1-default", + }}, + }, + }, + }, + }), + InitialConfigEntries: []api.ConfigEntry{ + &api.ExportedServicesConfigEntry{ + Name: "default", + Services: []api.ExportedService{{ + Name: "ping", + Consumers: []api.ServiceConsumer{{ + Peer: "peer-dc2-default", + }}, + }}, + }, + }, + }, + }, + Peerings: []*topology.Peering{{ + Dialing: topology.PeerCluster{ + Name: "dc1", + }, + Accepting: topology.PeerCluster{ + Name: "dc2", + }, + }}, + } + + sp := sprawltest.Launch(t, cfg) + + for _, cluster := range sp.Topology().Clusters { + leader, err := sp.Leader(cluster.Name) + require.NoError(t, err) + t.Logf("%s: leader = %s", cluster.Name, leader.ID()) + + followers, err := sp.Followers(cluster.Name) + require.NoError(t, err) + for _, f := range followers { + t.Logf("%s: follower = %s", cluster.Name, f.ID()) + } + } +} + +func newTopologyServerSet( + namePrefix string, + num int, + networks []string, + mutateFn func(i int, node *topology.Node), +) []*topology.Node { + var out []*topology.Node + for i := 1; i <= num; i++ { + name := namePrefix + strconv.Itoa(i) + + node := &topology.Node{ + Kind: topology.NodeKindServer, + Name: name, + } + for _, net := range networks { + node.Addresses = append(node.Addresses, &topology.Address{Network: net}) + } + + if mutateFn != nil { + mutateFn(i, node) + } + + out = append(out, node) + } + return out +} diff --git a/testing/deployer/sprawl/tls.go b/testing/deployer/sprawl/tls.go new file mode 100644 index 000000000000..748e85dd6b6f --- /dev/null +++ b/testing/deployer/sprawl/tls.go @@ -0,0 +1,114 @@ +package sprawl + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/hashicorp/consul/testing/deployer/topology" +) + +const ( + consulUID = "100" + consulGID = "1000" + consulUserArg = consulUID + ":" + consulGID +) + +func tlsPrefixFromNode(node *topology.Node) string { + switch node.Kind { + case topology.NodeKindServer: + return node.Partition + "." + node.Name + ".server" + case topology.NodeKindClient: + return node.Partition + "." + node.Name + ".client" + default: + return "" + } +} + +func tlsCertCreateCommand(node *topology.Node) string { + if node.IsServer() { + return fmt.Sprintf(`consul tls cert create -server -dc=%s -node=%s`, node.Datacenter, node.PodName()) + } else { + return fmt.Sprintf(`consul tls cert create -client -dc=%s`, node.Datacenter) + } +} + +func (s *Sprawl) initTLS(ctx context.Context) error { + for _, cluster := range s.topology.Clusters { + + var buf bytes.Buffer + + // Create the CA if not already done, and proceed to do all of the + // consul CLI calls inside of a throwaway temp directory. + buf.WriteString(` +if [[ ! -f consul-agent-ca-key.pem || ! -f consul-agent-ca.pem ]]; then + consul tls ca create +fi +rm -rf tmp +mkdir -p tmp +cp -a consul-agent-ca-key.pem consul-agent-ca.pem tmp +cd tmp +`) + + for _, node := range cluster.Nodes { + if !node.IsAgent() || node.Disabled { + continue + } + + node.TLSCertPrefix = tlsPrefixFromNode(node) + if node.TLSCertPrefix == "" { + continue + } + + expectPrefix := cluster.Datacenter + "-" + string(node.Kind) + "-consul-0" + + // Conditionally generate these in isolation and rename them to + // not rely upon the numerical indexing. + buf.WriteString(fmt.Sprintf(` +if [[ ! -f %[1]s || ! -f %[2]s ]]; then + rm -f %[3]s %[4]s + %[5]s + mv -f %[3]s %[1]s + mv -f %[4]s %[2]s +fi +`, + "../"+node.TLSCertPrefix+"-key.pem", "../"+node.TLSCertPrefix+".pem", + expectPrefix+"-key.pem", expectPrefix+".pem", + tlsCertCreateCommand(node), + )) + } + + err := s.runner.DockerExec(ctx, []string{ + "run", + "--rm", + "-i", + "--net=none", + "-v", cluster.TLSVolumeName + ":/data", + "busybox:latest", + "sh", "-c", + // Need this so the permissions stick; docker seems to treat unused volumes differently. + `touch /data/VOLUME_PLACEHOLDER && chown -R ` + consulUserArg + ` /data`, + }, io.Discard, nil) + if err != nil { + return fmt.Errorf("could not initialize docker volume for cert data %q: %w", cluster.TLSVolumeName, err) + } + + err = s.runner.DockerExec(ctx, []string{"run", + "--rm", + "-i", + "--net=none", + "-u", consulUserArg, + "-v", cluster.TLSVolumeName + ":/data", + "-w", "/data", + "--entrypoint", "", + cluster.Images.Consul, + "/bin/sh", "-ec", buf.String(), + }, io.Discard, nil) + if err != nil { + return fmt.Errorf("could not create all necessary TLS certificates in docker volume: %v", err) + } + } + + return nil +} diff --git a/testing/deployer/topology/compile.go b/testing/deployer/topology/compile.go new file mode 100644 index 000000000000..2bdf9ad2c2bc --- /dev/null +++ b/testing/deployer/topology/compile.go @@ -0,0 +1,671 @@ +package topology + +import ( + crand "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "reflect" + "regexp" + "sort" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-hclog" +) + +const DockerPrefix = "consulcluster" + +func Compile(logger hclog.Logger, raw *Config) (*Topology, error) { + return compile(logger, raw, nil) +} + +func Recompile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error) { + if prev == nil { + return nil, errors.New("missing previous topology") + } + return compile(logger, raw, prev) +} + +func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error) { + var id string + if prev == nil { + var err error + id, err = newTopologyID() + if err != nil { + return nil, err + } + } else { + id = prev.ID + } + + images := DefaultImages().OverrideWith(raw.Images) + if images.Consul != "" { + return nil, fmt.Errorf("topology.images.consul cannot be set at this level") + } + + if len(raw.Networks) == 0 { + return nil, fmt.Errorf("topology.networks is empty") + } + + networks := make(map[string]*Network) + for _, net := range raw.Networks { + if net.DockerName != "" { + return nil, fmt.Errorf("network %q should not specify DockerName", net.Name) + } + if !IsValidLabel(net.Name) { + return nil, fmt.Errorf("network name is not valid: %s", net.Name) + } + if _, exists := networks[net.Name]; exists { + return nil, fmt.Errorf("cannot have two networks with the same name %q", net.Name) + } + + switch net.Type { + case "": + net.Type = "lan" + case "wan", "lan": + default: + return nil, fmt.Errorf("network %q has unknown type %q", net.Name, net.Type) + } + + networks[net.Name] = net + net.DockerName = DockerPrefix + "-" + net.Name + "-" + id + } + + if len(raw.Clusters) == 0 { + return nil, fmt.Errorf("topology.clusters is empty") + } + + var ( + clusters = make(map[string]*Cluster) + nextIndex int // use a global index so any shared networks work properly with assignments + ) + + foundPeerNames := make(map[string]map[string]struct{}) + for _, c := range raw.Clusters { + if c.Name == "" { + return nil, fmt.Errorf("cluster has no name") + } + + foundPeerNames[c.Name] = make(map[string]struct{}) + + if !IsValidLabel(c.Name) { + return nil, fmt.Errorf("cluster name is not valid: %s", c.Name) + } + + if _, exists := clusters[c.Name]; exists { + return nil, fmt.Errorf("cannot have two clusters with the same name %q; use unique names and override the Datacenter field if that's what you want", c.Name) + } + + if c.Datacenter == "" { + c.Datacenter = c.Name + } else { + if !IsValidLabel(c.Datacenter) { + return nil, fmt.Errorf("datacenter name is not valid: %s", c.Datacenter) + } + } + + clusters[c.Name] = c + if c.NetworkName == "" { + c.NetworkName = c.Name + } + + c.Images = images.OverrideWith(c.Images).ChooseConsul(c.Enterprise) + + if _, ok := networks[c.NetworkName]; !ok { + return nil, fmt.Errorf("cluster %q uses network name %q that does not exist", c.Name, c.NetworkName) + } + + if len(c.Nodes) == 0 { + return nil, fmt.Errorf("cluster %q has no nodes", c.Name) + } + + if c.TLSVolumeName != "" { + return nil, fmt.Errorf("user cannot specify the TLSVolumeName field") + } + + tenancies := make(map[string]map[string]struct{}) + addTenancy := func(partition, namespace string) { + partition = PartitionOrDefault(partition) + namespace = NamespaceOrDefault(namespace) + m, ok := tenancies[partition] + if !ok { + m = make(map[string]struct{}) + tenancies[partition] = m + } + m[namespace] = struct{}{} + } + + for _, ap := range c.Partitions { + addTenancy(ap.Name, "default") + for _, ns := range ap.Namespaces { + addTenancy(ap.Name, ns) + } + } + + for _, ce := range c.InitialConfigEntries { + addTenancy(ce.GetPartition(), ce.GetNamespace()) + } + + seenNodes := make(map[NodeID]struct{}) + for _, n := range c.Nodes { + if n.Name == "" { + return nil, fmt.Errorf("cluster %q node has no name", c.Name) + } + if !IsValidLabel(n.Name) { + return nil, fmt.Errorf("node name is not valid: %s", n.Name) + } + + switch n.Kind { + case NodeKindServer, NodeKindClient, NodeKindDataplane: + default: + return nil, fmt.Errorf("cluster %q node %q has invalid kind: %s", c.Name, n.Name, n.Kind) + } + + n.Partition = PartitionOrDefault(n.Partition) + if !IsValidLabel(n.Partition) { + return nil, fmt.Errorf("node partition is not valid: %s", n.Partition) + } + addTenancy(n.Partition, "default") + + if _, exists := seenNodes[n.ID()]; exists { + return nil, fmt.Errorf("cannot have two nodes in the same cluster %q with the same name %q", c.Name, n.ID()) + } + seenNodes[n.ID()] = struct{}{} + + if len(n.usedPorts) != 0 { + return nil, fmt.Errorf("user cannot specify the usedPorts field") + } + n.usedPorts = make(map[int]int) + exposePort := func(v int) bool { + if _, ok := n.usedPorts[v]; ok { + return false + } + n.usedPorts[v] = 0 + return true + } + + if n.IsAgent() { + // TODO: the ux here is awful; we should be able to examine the topology to guess properly + exposePort(8500) + if n.IsServer() { + exposePort(8503) + } else { + exposePort(8502) + } + } + + if n.Index != 0 { + return nil, fmt.Errorf("user cannot specify the node index") + } + n.Index = nextIndex + nextIndex++ + + n.Images = c.Images.OverrideWith(n.Images).ChooseNode(n.Kind) + + n.Cluster = c.Name + n.Datacenter = c.Datacenter + n.dockerName = DockerPrefix + "-" + n.Name + "-" + id + + if len(n.Addresses) == 0 { + n.Addresses = append(n.Addresses, &Address{Network: c.NetworkName}) + } + var ( + numPublic int + numLocal int + ) + for _, addr := range n.Addresses { + if addr.Network == "" { + return nil, fmt.Errorf("cluster %q node %q has invalid address", c.Name, n.Name) + } + + if addr.Type != "" { + return nil, fmt.Errorf("user cannot specify the address type directly") + } + + net, ok := networks[addr.Network] + if !ok { + return nil, fmt.Errorf("cluster %q node %q uses network name %q that does not exist", c.Name, n.Name, addr.Network) + } + + if net.IsPublic() { + numPublic++ + } else if net.IsLocal() { + numLocal++ + } + addr.Type = net.Type + + addr.DockerNetworkName = net.DockerName + } + + if numLocal == 0 { + return nil, fmt.Errorf("cluster %q node %q has no local addresses", c.Name, n.Name) + } + if numPublic > 1 { + return nil, fmt.Errorf("cluster %q node %q has more than one public address", c.Name, n.Name) + } + + seenServices := make(map[ServiceID]struct{}) + for _, svc := range n.Services { + if n.IsAgent() { + // Default to that of the enclosing node. + svc.ID.Partition = n.Partition + } + svc.ID.Normalize() + + // Denormalize + svc.Node = n + + if !IsValidLabel(svc.ID.Partition) { + return nil, fmt.Errorf("service partition is not valid: %s", svc.ID.Partition) + } + if !IsValidLabel(svc.ID.Namespace) { + return nil, fmt.Errorf("service namespace is not valid: %s", svc.ID.Namespace) + } + if !IsValidLabel(svc.ID.Name) { + return nil, fmt.Errorf("service name is not valid: %s", svc.ID.Name) + } + addTenancy(svc.ID.Partition, svc.ID.Namespace) + + if _, exists := seenServices[svc.ID]; exists { + return nil, fmt.Errorf("cannot have two services on the same node %q in the same cluster %q with the same name %q", n.ID(), c.Name, svc.ID) + } + seenServices[svc.ID] = struct{}{} + + if !svc.DisableServiceMesh && n.IsDataplane() { + if svc.EnvoyPublicListenerPort <= 0 { + if _, ok := n.usedPorts[20000]; !ok { + // For convenience the FIRST service on a node can get 20000 for free. + svc.EnvoyPublicListenerPort = 20000 + } else { + return nil, fmt.Errorf("envoy public listener port is required") + } + } + } + + // add all of the service ports + for _, port := range svc.ports() { + if ok := exposePort(port); !ok { + return nil, fmt.Errorf("port used more than once on cluster %q node %q: %d", c.Name, n.ID(), port) + } + } + + // TODO(rb): re-expose? + // switch svc.Protocol { + // case "": + // svc.Protocol = "tcp" + // fallthrough + // case "tcp": + // if svc.CheckHTTP != "" { + // return nil, fmt.Errorf("cannot set CheckHTTP for tcp service") + // } + // case "http": + // if svc.CheckTCP != "" { + // return nil, fmt.Errorf("cannot set CheckTCP for tcp service") + // } + // default: + // return nil, fmt.Errorf("service has invalid protocol: %s", svc.Protocol) + // } + + for _, u := range svc.Upstreams { + // Default to that of the enclosing service. + if u.Peer == "" { + if u.ID.Partition == "" { + u.ID.Partition = svc.ID.Partition + } + if u.ID.Namespace == "" { + u.ID.Namespace = svc.ID.Namespace + } + } else { + if u.ID.Partition != "" { + u.ID.Partition = "" // irrelevant here; we'll set it to the value of the OTHER side for plumbing purposes in tests + } + u.ID.Namespace = NamespaceOrDefault(u.ID.Namespace) + foundPeerNames[c.Name][u.Peer] = struct{}{} + } + + if u.ID.Name == "" { + return nil, fmt.Errorf("upstream service name is required") + } + addTenancy(u.ID.Partition, u.ID.Namespace) + } + + if err := svc.Validate(); err != nil { + return nil, fmt.Errorf("cluster %q node %q service %q is not valid: %w", c.Name, n.Name, svc.ID.String(), err) + } + } + } + + // Explode this into the explicit list based on stray references made. + c.Partitions = nil + for ap, nsMap := range tenancies { + p := &Partition{ + Name: ap, + } + for ns := range nsMap { + p.Namespaces = append(p.Namespaces, ns) + } + sort.Strings(p.Namespaces) + c.Partitions = append(c.Partitions, p) + } + sort.Slice(c.Partitions, func(i, j int) bool { + return c.Partitions[i].Name < c.Partitions[j].Name + }) + + if !c.Enterprise { + expect := []*Partition{{Name: "default", Namespaces: []string{"default"}}} + if !reflect.DeepEqual(c.Partitions, expect) { + return nil, fmt.Errorf("cluster %q references non-default partitions or namespaces but is OSS", c.Name) + } + } + } + + clusteredPeerings := make(map[string]map[string]*PeerCluster) // local-cluster -> local-peer -> info + addPeerMapEntry := func(pc PeerCluster) { + pm, ok := clusteredPeerings[pc.Name] + if !ok { + pm = make(map[string]*PeerCluster) + clusteredPeerings[pc.Name] = pm + } + pm[pc.PeerName] = &pc + } + for _, p := range raw.Peerings { + dialingCluster, ok := clusters[p.Dialing.Name] + if !ok { + return nil, fmt.Errorf("peering references a dialing cluster that does not exist: %s", p.Dialing.Name) + } + acceptingCluster, ok := clusters[p.Accepting.Name] + if !ok { + return nil, fmt.Errorf("peering references an accepting cluster that does not exist: %s", p.Accepting.Name) + } + if p.Dialing.Name == p.Accepting.Name { + return nil, fmt.Errorf("self peerings are not allowed: %s", p.Dialing.Name) + } + + p.Dialing.Partition = PartitionOrDefault(p.Dialing.Partition) + p.Accepting.Partition = PartitionOrDefault(p.Accepting.Partition) + + if dialingCluster.Enterprise { + if !dialingCluster.hasPartition(p.Dialing.Partition) { + return nil, fmt.Errorf("dialing side of peering cannot reference a partition that does not exist: %s", p.Dialing.Partition) + } + } else { + if p.Dialing.Partition != "default" { + return nil, fmt.Errorf("dialing side of peering cannot reference a partition when OSS") + } + } + if acceptingCluster.Enterprise { + if !acceptingCluster.hasPartition(p.Accepting.Partition) { + return nil, fmt.Errorf("accepting side of peering cannot reference a partition that does not exist: %s", p.Accepting.Partition) + } + } else { + if p.Accepting.Partition != "default" { + return nil, fmt.Errorf("accepting side of peering cannot reference a partition when OSS") + } + } + + if p.Dialing.PeerName == "" { + p.Dialing.PeerName = "peer-" + p.Accepting.Name + "-" + p.Accepting.Partition + } + if p.Accepting.PeerName == "" { + p.Accepting.PeerName = "peer-" + p.Dialing.Name + "-" + p.Dialing.Partition + } + + { // Ensure the link fields do not have recursive links. + p.Dialing.Link = nil + p.Accepting.Link = nil + + // Copy the un-linked data before setting the link + pa := p.Accepting + pd := p.Dialing + + p.Accepting.Link = &pd + p.Dialing.Link = &pa + } + + addPeerMapEntry(p.Accepting) + addPeerMapEntry(p.Dialing) + + delete(foundPeerNames[p.Accepting.Name], p.Accepting.PeerName) + delete(foundPeerNames[p.Dialing.Name], p.Dialing.PeerName) + } + + for cluster, peers := range foundPeerNames { + if len(peers) > 0 { + var pretty []string + for name := range peers { + pretty = append(pretty, name) + } + sort.Strings(pretty) + return nil, fmt.Errorf("cluster[%s] found topology references to peerings that do not exist: %v", cluster, pretty) + } + } + + // after we decoded the peering stuff, we can fill in some computed data in the upstreams + for _, c := range clusters { + c.Peerings = clusteredPeerings[c.Name] + for _, n := range c.Nodes { + for _, svc := range n.Services { + for _, u := range svc.Upstreams { + if u.Peer == "" { + u.Cluster = c.Name + u.Peering = nil + continue + } + remotePeer, ok := c.Peerings[u.Peer] + if !ok { + return nil, fmt.Errorf("not possible") + } + u.Cluster = remotePeer.Link.Name + u.Peering = remotePeer.Link + // this helps in generating fortio assertions; otherwise field is ignored + u.ID.Partition = remotePeer.Link.Partition + } + } + } + } + + t := &Topology{ + ID: id, + Networks: networks, + Clusters: clusters, + Images: images, + Peerings: raw.Peerings, + } + + if prev != nil { + // networks cannot change + if !sameKeys(prev.Networks, t.Networks) { + return nil, fmt.Errorf("cannot create or destroy networks") + } + + for _, newNetwork := range t.Networks { + oldNetwork := prev.Networks[newNetwork.Name] + + // Carryover + newNetwork.inheritFromExisting(oldNetwork) + + if err := isSame(oldNetwork, newNetwork); err != nil { + return nil, fmt.Errorf("networks cannot change: %w", err) + } + + } + + // cannot add or remove an entire cluster + if !sameKeys(prev.Clusters, t.Clusters) { + return nil, fmt.Errorf("cannot create or destroy clusters") + } + + for _, newCluster := range t.Clusters { + oldCluster := prev.Clusters[newCluster.Name] + + // Carryover + newCluster.inheritFromExisting(oldCluster) + + if newCluster.Name != oldCluster.Name || + newCluster.NetworkName != oldCluster.NetworkName || + newCluster.Datacenter != oldCluster.Datacenter || + newCluster.Enterprise != oldCluster.Enterprise { + return nil, fmt.Errorf("cannot edit some cluster fields for %q", newCluster.Name) + } + + // WARN on presence of some things. + if len(newCluster.InitialConfigEntries) > 0 { + logger.Warn("initial config entries were provided, but are skipped on recompile") + } + + // Check NODES + if err := inheritAndValidateNodes(oldCluster.Nodes, newCluster.Nodes); err != nil { + return nil, fmt.Errorf("some immutable aspects of nodes were changed in cluster %q: %w", newCluster.Name, err) + } + } + } + + return t, nil +} + +const permutedWarning = "use the disabled node kind if you want to ignore a node" + +func inheritAndValidateNodes( + prev, curr []*Node, +) error { + nodeMap := mapifyNodes(curr) + + for prevIdx, node := range prev { + currNode, ok := nodeMap[node.ID()] + if !ok { + return fmt.Errorf("node %q has vanished; "+permutedWarning, node.ID()) + } + // Ensure it hasn't been permuted. + if currNode.Pos != prevIdx { + return fmt.Errorf( + "node %q has been shuffled %d -> %d; "+permutedWarning, + node.ID(), + prevIdx, + currNode.Pos, + ) + } + + if currNode.Node.Kind != node.Kind || + currNode.Node.Partition != node.Partition || + currNode.Node.Name != node.Name || + currNode.Node.Index != node.Index || + len(currNode.Node.Addresses) != len(node.Addresses) || + !sameKeys(currNode.Node.usedPorts, node.usedPorts) { + return fmt.Errorf("cannot edit some node fields for %q", node.ID()) + } + + currNode.Node.inheritFromExisting(node) + + for i := 0; i < len(currNode.Node.Addresses); i++ { + prevAddr := node.Addresses[i] + currAddr := currNode.Node.Addresses[i] + + if prevAddr.Network != currAddr.Network { + return fmt.Errorf("addresses were shuffled for node %q", node.ID()) + } + + if prevAddr.Type != currAddr.Type { + return fmt.Errorf("cannot edit some address fields for %q", node.ID()) + } + + currAddr.inheritFromExisting(prevAddr) + } + + svcMap := mapifyServices(currNode.Node.Services) + + for _, svc := range node.Services { + currSvc, ok := svcMap[svc.ID] + if !ok { + continue // service has vanished, this is ok + } + // don't care about index permutation + + if currSvc.ID != svc.ID || + currSvc.Port != svc.Port || + currSvc.EnvoyAdminPort != svc.EnvoyAdminPort || + currSvc.EnvoyPublicListenerPort != svc.EnvoyPublicListenerPort || + isSame(currSvc.Command, svc.Command) != nil || + isSame(currSvc.Env, svc.Env) != nil { + return fmt.Errorf("cannot edit some address fields for %q", svc.ID) + } + + currSvc.inheritFromExisting(svc) + } + } + return nil +} + +func newTopologyID() (string, error) { + const n = 16 + id := make([]byte, n) + if _, err := crand.Read(id[:]); err != nil { + return "", err + } + return hex.EncodeToString(id)[:n], nil +} + +// matches valid DNS labels according to RFC 1123, should be at most 63 +// characters according to the RFC +var validLabel = regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?$`) + +// IsValidLabel returns true if the string given is a valid DNS label (RFC 1123). +// Note: the only difference between RFC 1035 and RFC 1123 labels is that in +// RFC 1123 labels can begin with a number. +func IsValidLabel(name string) bool { + return validLabel.MatchString(name) +} + +// ValidateLabel is similar to IsValidLabel except it returns an error +// instead of false when name is not a valid DNS label. The error will contain +// reference to what constitutes a valid DNS label. +func ValidateLabel(name string) error { + if !IsValidLabel(name) { + return errors.New("a valid DNS label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character") + } + return nil +} + +func isSame(x, y any) error { + diff := cmp.Diff(x, y) + if diff != "" { + return fmt.Errorf("values are not equal\n--- expected\n+++ actual\n%v", diff) + } + return nil +} + +func sameKeys[K comparable, V any](x, y map[K]V) bool { + if len(x) != len(y) { + return false + } + + for kx := range x { + if _, ok := y[kx]; !ok { + return false + } + } + return true +} + +func mapifyNodes(nodes []*Node) map[NodeID]nodeWithPosition { + m := make(map[NodeID]nodeWithPosition) + for i, node := range nodes { + m[node.ID()] = nodeWithPosition{ + Pos: i, + Node: node, + } + } + return m +} + +type nodeWithPosition struct { + Pos int + Node *Node +} + +func mapifyServices(services []*Service) map[ServiceID]*Service { + m := make(map[ServiceID]*Service) + for _, svc := range services { + m[svc.ID] = svc + } + return m +} diff --git a/testing/deployer/topology/default_cdp.go b/testing/deployer/topology/default_cdp.go new file mode 100644 index 000000000000..eb3aa5bd20be --- /dev/null +++ b/testing/deployer/topology/default_cdp.go @@ -0,0 +1,3 @@ +package topology + +const DefaultDataplaneImage = "hashicorp/consul-dataplane:1.1.0" diff --git a/testing/deployer/topology/default_consul.go b/testing/deployer/topology/default_consul.go new file mode 100644 index 000000000000..e65b42cfd8b1 --- /dev/null +++ b/testing/deployer/topology/default_consul.go @@ -0,0 +1,4 @@ +package topology + +const DefaultConsulImage = "hashicorp/consul:1.15.2" +const DefaultConsulEnterpriseImage = "hashicorp/consul-enterprise:1.15.2-ent" diff --git a/testing/deployer/topology/default_envoy.go b/testing/deployer/topology/default_envoy.go new file mode 100644 index 000000000000..05ee5d5e5ad1 --- /dev/null +++ b/testing/deployer/topology/default_envoy.go @@ -0,0 +1,3 @@ +package topology + +const DefaultEnvoyImage = "envoyproxy/envoy:v1.25.1" diff --git a/testing/deployer/topology/ids.go b/testing/deployer/topology/ids.go new file mode 100644 index 000000000000..372bccec36a3 --- /dev/null +++ b/testing/deployer/topology/ids.go @@ -0,0 +1,142 @@ +package topology + +import ( + "fmt" + + "github.com/hashicorp/consul/api" +) + +type NodeServiceID struct { + Node string + Service string `json:",omitempty"` + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` +} + +func NewNodeServiceID(node, service, namespace, partition string) NodeServiceID { + id := NodeServiceID{ + Node: node, + Service: service, + Namespace: namespace, + Partition: partition, + } + id.Normalize() + return id +} + +func (id NodeServiceID) NodeID() NodeID { + return NewNodeID(id.Node, id.Partition) +} + +func (id NodeServiceID) ServiceID() ServiceID { + return NewServiceID(id.Service, id.Namespace, id.Partition) +} + +func (id *NodeServiceID) Normalize() { + id.Namespace = NamespaceOrDefault(id.Namespace) + id.Partition = PartitionOrDefault(id.Partition) +} + +func (id NodeServiceID) String() string { + return fmt.Sprintf("%s/%s/%s/%s", id.Partition, id.Node, id.Namespace, id.Service) +} + +type NodeID struct { + Name string `json:",omitempty"` + Partition string `json:",omitempty"` +} + +func NewNodeID(name, partition string) NodeID { + id := NodeID{ + Name: name, + Partition: partition, + } + id.Normalize() + return id +} + +func (id *NodeID) Normalize() { + id.Partition = PartitionOrDefault(id.Partition) +} + +func (id NodeID) String() string { + return fmt.Sprintf("%s/%s", id.Partition, id.Name) +} + +func (id NodeID) ACLString() string { + return fmt.Sprintf("%s--%s", id.Partition, id.Name) +} +func (id NodeID) TFString() string { + return id.ACLString() +} + +type ServiceID struct { + Name string `json:",omitempty"` + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` +} + +func NewServiceID(name, namespace, partition string) ServiceID { + id := ServiceID{ + Name: name, + Namespace: namespace, + Partition: partition, + } + id.Normalize() + return id +} + +func (id ServiceID) Less(other ServiceID) bool { + if id.Partition != other.Partition { + return id.Partition < other.Partition + } + if id.Namespace != other.Namespace { + return id.Namespace < other.Namespace + } + return id.Name < other.Name +} + +func (id *ServiceID) Normalize() { + id.Namespace = NamespaceOrDefault(id.Namespace) + id.Partition = PartitionOrDefault(id.Partition) +} + +func (id ServiceID) String() string { + return fmt.Sprintf("%s/%s/%s", id.Partition, id.Namespace, id.Name) +} + +func (id ServiceID) ACLString() string { + return fmt.Sprintf("%s--%s--%s", id.Partition, id.Namespace, id.Name) +} +func (id ServiceID) TFString() string { + return id.ACLString() +} + +func PartitionOrDefault(name string) string { + if name == "" { + return "default" + } + return name +} +func NamespaceOrDefault(name string) string { + if name == "" { + return "default" + } + return name +} + +func DefaultToEmpty(name string) string { + if name == "default" { + return "" + } + return name +} + +// PartitionQueryOptions returns an *api.QueryOptions with the given partition +// field set only if the partition is non-default. This helps when writing +// tests for joint use in OSS and ENT. +func PartitionQueryOptions(partition string) *api.QueryOptions { + return &api.QueryOptions{ + Partition: DefaultToEmpty(partition), + } +} diff --git a/testing/deployer/topology/images.go b/testing/deployer/topology/images.go new file mode 100644 index 000000000000..25901de66f02 --- /dev/null +++ b/testing/deployer/topology/images.go @@ -0,0 +1,123 @@ +package topology + +import ( + "strings" +) + +type Images struct { + Consul string `json:",omitempty"` + ConsulOSS string `json:",omitempty"` + ConsulEnterprise string `json:",omitempty"` + Envoy string + Dataplane string +} + +func (i Images) LocalDataplaneImage() string { + if i.Dataplane == "" { + return "" + } + + img, tag, ok := strings.Cut(i.Dataplane, ":") + if !ok { + tag = "latest" + } + + repo, name, ok := strings.Cut(img, "/") + if ok { + name = repo + "-" + name + } + + // ex: local/hashicorp-consul-dataplane:1.1.0 + return "local/" + name + ":" + tag +} + +func (i Images) EnvoyConsulImage() string { + if i.Consul == "" || i.Envoy == "" { + return "" + } + + img1, tag1, ok1 := strings.Cut(i.Consul, ":") + img2, tag2, ok2 := strings.Cut(i.Envoy, ":") + if !ok1 { + tag1 = "latest" + } + if !ok2 { + tag2 = "latest" + } + + repo1, name1, ok1 := strings.Cut(img1, "/") + repo2, name2, ok2 := strings.Cut(img2, "/") + + if ok1 { + name1 = repo1 + "-" + name1 + } else { + name1 = repo1 + } + if ok2 { + name2 = repo2 + "-" + name2 + } else { + name2 = repo2 + } + + // ex: local/hashicorp-consul-and-envoyproxy-envoy:1.15.0-with-v1.26.2 + return "local/" + name1 + "-and-" + name2 + ":" + tag1 + "-with-" + tag2 +} + +func (i Images) ChooseNode(kind NodeKind) Images { + switch kind { + case NodeKindServer: + i.Envoy = "" + i.Dataplane = "" + case NodeKindClient: + i.Dataplane = "" + case NodeKindDataplane: + i.Envoy = "" + default: + // do nothing + } + return i +} + +func (i Images) ChooseConsul(enterprise bool) Images { + if enterprise { + i.Consul = i.ConsulEnterprise + } else { + i.Consul = i.ConsulOSS + } + i.ConsulEnterprise = "" + i.ConsulOSS = "" + return i +} + +func (i Images) OverrideWith(i2 Images) Images { + if i2.Consul != "" { + i.Consul = i2.Consul + } + if i2.ConsulOSS != "" { + i.ConsulOSS = i2.ConsulOSS + } + if i2.ConsulEnterprise != "" { + i.ConsulEnterprise = i2.ConsulEnterprise + } + if i2.Envoy != "" { + i.Envoy = i2.Envoy + } + if i2.Dataplane != "" { + i.Dataplane = i2.Dataplane + } + return i +} + +// DefaultImages controls which specific docker images are used as default +// values for topology components that do not specify values. +// +// These can be bulk-updated using the make target 'make update-defaults' +func DefaultImages() Images { + return Images{ + Consul: "", + ConsulOSS: DefaultConsulImage, + ConsulEnterprise: DefaultConsulEnterpriseImage, + Envoy: DefaultEnvoyImage, + Dataplane: DefaultDataplaneImage, + } +} diff --git a/testing/deployer/topology/images_test.go b/testing/deployer/topology/images_test.go new file mode 100644 index 000000000000..a8af9029d1a9 --- /dev/null +++ b/testing/deployer/topology/images_test.go @@ -0,0 +1,98 @@ +package topology + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestImages_EnvoyConsulImage(t *testing.T) { + type testcase struct { + consul, envoy string + expect string + } + + run := func(t *testing.T, tc testcase) { + i := Images{Consul: tc.consul, Envoy: tc.envoy} + j := i.EnvoyConsulImage() + require.Equal(t, tc.expect, j) + } + + cases := []testcase{ + { + consul: "", + envoy: "", + expect: "", + }, + { + consul: "consul", + envoy: "", + expect: "", + }, + { + consul: "", + envoy: "envoy", + expect: "", + }, + { + consul: "consul", + envoy: "envoy", + expect: "local/consul-and-envoy:latest-with-latest", + }, + // repos + { + consul: "hashicorp/consul", + envoy: "envoy", + expect: "local/hashicorp-consul-and-envoy:latest-with-latest", + }, + { + consul: "consul", + envoy: "envoyproxy/envoy", + expect: "local/consul-and-envoyproxy-envoy:latest-with-latest", + }, + { + consul: "hashicorp/consul", + envoy: "envoyproxy/envoy", + expect: "local/hashicorp-consul-and-envoyproxy-envoy:latest-with-latest", + }, + // tags + { + consul: "consul:1.15.0", + envoy: "envoy", + expect: "local/consul-and-envoy:1.15.0-with-latest", + }, + { + consul: "consul", + envoy: "envoy:v1.26.1", + expect: "local/consul-and-envoy:latest-with-v1.26.1", + }, + { + consul: "consul:1.15.0", + envoy: "envoy:v1.26.1", + expect: "local/consul-and-envoy:1.15.0-with-v1.26.1", + }, + // repos+tags + { + consul: "hashicorp/consul:1.15.0", + envoy: "envoy:v1.26.1", + expect: "local/hashicorp-consul-and-envoy:1.15.0-with-v1.26.1", + }, + { + consul: "consul:1.15.0", + envoy: "envoyproxy/envoy:v1.26.1", + expect: "local/consul-and-envoyproxy-envoy:1.15.0-with-v1.26.1", + }, + { + consul: "hashicorp/consul:1.15.0", + envoy: "envoyproxy/envoy:v1.26.1", + expect: "local/hashicorp-consul-and-envoyproxy-envoy:1.15.0-with-v1.26.1", + }, + } + + for i, tc := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/testing/deployer/topology/topology.go b/testing/deployer/topology/topology.go new file mode 100644 index 000000000000..fbdf2605d53f --- /dev/null +++ b/testing/deployer/topology/topology.go @@ -0,0 +1,787 @@ +package topology + +import ( + "errors" + "fmt" + "net" + "net/netip" + "reflect" + "sort" + + "github.com/hashicorp/consul/api" +) + +type Topology struct { + ID string + + // Images controls which specific docker images are used when running this + // node. Non-empty fields here override non-empty fields inherited from the + // general default values from DefaultImages(). + Images Images + + // Networks is the list of networks to create for this set of clusters. + Networks map[string]*Network + + // Clusters defines the list of Consul clusters that should be created, and + // their associated workloads. + Clusters map[string]*Cluster + + // Peerings defines the list of pairwise peerings that should be established + // between clusters. + Peerings []*Peering `json:",omitempty"` +} + +func (t *Topology) DigestExposedProxyPort(netName string, proxyPort int) (bool, error) { + net, ok := t.Networks[netName] + if !ok { + return false, fmt.Errorf("found output network that does not exist: %s", netName) + } + if net.ProxyPort == proxyPort { + return false, nil + } + + net.ProxyPort = proxyPort + + // Denormalize for UX. + for _, cluster := range t.Clusters { + for _, node := range cluster.Nodes { + for _, addr := range node.Addresses { + if addr.Network == netName { + addr.ProxyPort = proxyPort + } + } + } + } + + return true, nil +} + +func (t *Topology) SortedNetworks() []*Network { + var out []*Network + for _, n := range t.Networks { + out = append(out, n) + } + sort.Slice(out, func(i, j int) bool { + return out[i].Name < out[j].Name + }) + return out +} + +func (t *Topology) SortedClusters() []*Cluster { + var out []*Cluster + for _, c := range t.Clusters { + out = append(out, c) + } + sort.Slice(out, func(i, j int) bool { + return out[i].Name < out[j].Name + }) + return out +} + +type Config struct { + // Images controls which specific docker images are used when running this + // node. Non-empty fields here override non-empty fields inherited from the + // general default values from DefaultImages(). + Images Images + + // Networks is the list of networks to create for this set of clusters. + Networks []*Network + + // Clusters defines the list of Consul clusters that should be created, and + // their associated workloads. + Clusters []*Cluster + + // Peerings defines the list of pairwise peerings that should be established + // between clusters. + Peerings []*Peering +} + +func (c *Config) Cluster(name string) *Cluster { + for _, cluster := range c.Clusters { + if cluster.Name == name { + return cluster + } + } + return nil +} + +type Network struct { + Type string // lan/wan ; empty means lan + Name string // logical name + + // computed at topology compile + DockerName string + // generated during network-and-tls + Subnet string + IPPool []string `json:"-"` + // generated during network-and-tls + ProxyAddress string `json:",omitempty"` + DNSAddress string `json:",omitempty"` + // filled in from terraform outputs after network-and-tls + ProxyPort int `json:",omitempty"` +} + +func (n *Network) IsLocal() bool { + return n.Type == "" || n.Type == "lan" +} + +func (n *Network) IsPublic() bool { + return n.Type == "wan" +} + +func (n *Network) inheritFromExisting(existing *Network) { + n.Subnet = existing.Subnet + n.IPPool = existing.IPPool + n.ProxyAddress = existing.ProxyAddress + n.DNSAddress = existing.DNSAddress + n.ProxyPort = existing.ProxyPort +} + +func (n *Network) IPByIndex(index int) string { + if index >= len(n.IPPool) { + panic(fmt.Sprintf( + "not enough ips on this network to assign index %d: %d", + len(n.IPPool), index, + )) + } + return n.IPPool[index] +} + +func (n *Network) SetSubnet(subnet string) (bool, error) { + if n.Subnet == subnet { + return false, nil + } + + p, err := netip.ParsePrefix(subnet) + if err != nil { + return false, err + } + if !p.IsValid() { + return false, errors.New("not valid") + } + p = p.Masked() + + var ipPool []string + + addr := p.Addr() + for { + if !p.Contains(addr) { + break + } + ipPool = append(ipPool, addr.String()) + addr = addr.Next() + } + + ipPool = ipPool[2:] // skip the x.x.x.{0,1} + + n.Subnet = subnet + n.IPPool = ipPool + return true, nil +} + +// Cluster represents a single standalone install of Consul. This is the unit +// of what is peered when using cluster peering. Older consul installs would +// call this a datacenter. +type Cluster struct { + Name string + NetworkName string // empty assumes same as Name + + // Images controls which specific docker images are used when running this + // cluster. Non-empty fields here override non-empty fields inherited from + // the enclosing Topology. + Images Images + + // Enterprise marks this cluster as desiring to run Consul Enterprise + // components. + Enterprise bool `json:",omitempty"` + + // Nodes is the definition of the nodes (agent-less and agent-ful). + Nodes []*Node + + // Partitions is a list of tenancy configurations that should be created + // after the servers come up but before the clients and the rest of the + // topology starts. + // + // Enterprise Only. + Partitions []*Partition `json:",omitempty"` + + // Datacenter defaults to "Name" if left unspecified. It lets you possibly + // create multiple peer clusters with identical datacenter names. + Datacenter string + + // InitialConfigEntries is a convenience function to have some config + // entries created after the servers start up but before the rest of the + // topology comes up. + InitialConfigEntries []api.ConfigEntry `json:",omitempty"` + + // TLSVolumeName is the docker volume name containing the various certs + // generated by 'consul tls cert create' + // + // This is generated during the networking phase and is not user specified. + TLSVolumeName string `json:",omitempty"` + + // Peerings is a map of peering names to information about that peering in this cluster + // + // Denormalized during compile. + Peerings map[string]*PeerCluster `json:",omitempty"` +} + +func (c *Cluster) inheritFromExisting(existing *Cluster) { + c.TLSVolumeName = existing.TLSVolumeName +} + +type Partition struct { + Name string + Namespaces []string +} + +func (c *Cluster) hasPartition(p string) bool { + for _, partition := range c.Partitions { + if partition.Name == p { + return true + } + } + return false +} + +func (c *Cluster) PartitionQueryOptionsList() []*api.QueryOptions { + if !c.Enterprise { + return []*api.QueryOptions{{}} + } + + var out []*api.QueryOptions + for _, p := range c.Partitions { + out = append(out, &api.QueryOptions{Partition: p.Name}) + } + return out +} + +func (c *Cluster) ServerNodes() []*Node { + var out []*Node + for _, node := range c.SortedNodes() { + if node.Kind != NodeKindServer || node.Disabled { + continue + } + out = append(out, node) + } + return out +} + +func (c *Cluster) ServerByAddr(addr string) *Node { + expect, _, err := net.SplitHostPort(addr) + if err != nil { + return nil + } + + for _, node := range c.Nodes { + if node.Kind != NodeKindServer || node.Disabled { + continue + } + if node.LocalAddress() == expect { + return node + } + } + + return nil +} + +func (c *Cluster) FirstServer() *Node { + for _, node := range c.Nodes { + if node.IsServer() && !node.Disabled && node.ExposedPort(8500) > 0 { + return node + } + } + return nil +} + +func (c *Cluster) FirstClient() *Node { + for _, node := range c.Nodes { + if node.Kind != NodeKindClient || node.Disabled { + continue + } + return node + } + return nil +} + +func (c *Cluster) ActiveNodes() []*Node { + var out []*Node + for _, node := range c.Nodes { + if !node.Disabled { + out = append(out, node) + } + } + return out +} + +func (c *Cluster) SortedNodes() []*Node { + var out []*Node + out = append(out, c.Nodes...) + + kindOrder := map[NodeKind]int{ + NodeKindServer: 1, + NodeKindClient: 2, + NodeKindDataplane: 2, + } + sort.Slice(out, func(i, j int) bool { + ni, nj := out[i], out[j] + + // servers before clients/dataplanes + ki, kj := kindOrder[ni.Kind], kindOrder[nj.Kind] + if ki < kj { + return true + } else if ki > kj { + return false + } + + // lex sort by partition + if ni.Partition < nj.Partition { + return true + } else if ni.Partition > nj.Partition { + return false + } + + // lex sort by name + return ni.Name < nj.Name + }) + return out +} + +func (c *Cluster) FindService(id NodeServiceID) *Service { + id.Normalize() + + nid := id.NodeID() + sid := id.ServiceID() + return c.ServiceByID(nid, sid) +} + +func (c *Cluster) ServiceByID(nid NodeID, sid ServiceID) *Service { + return c.NodeByID(nid).ServiceByID(sid) +} + +func (c *Cluster) ServicesByID(sid ServiceID) []*Service { + sid.Normalize() + + var out []*Service + for _, n := range c.Nodes { + for _, svc := range n.Services { + if svc.ID == sid { + out = append(out, svc) + } + } + } + return out +} + +func (c *Cluster) NodeByID(nid NodeID) *Node { + nid.Normalize() + for _, n := range c.Nodes { + if n.ID() == nid { + return n + } + } + panic("node not found: " + nid.String()) +} + +type Address struct { + Network string + + // denormalized at topology compile + Type string + // denormalized at topology compile + DockerNetworkName string + // generated after network-and-tls + IPAddress string + // denormalized from terraform outputs stored in the Network + ProxyPort int `json:",omitempty"` +} + +func (a *Address) inheritFromExisting(existing *Address) { + a.IPAddress = existing.IPAddress + a.ProxyPort = existing.ProxyPort +} + +func (a Address) IsLocal() bool { + return a.Type == "" || a.Type == "lan" +} + +func (a Address) IsPublic() bool { + return a.Type == "wan" +} + +type NodeKind string + +const ( + NodeKindUnknown NodeKind = "" + NodeKindServer NodeKind = "server" + NodeKindClient NodeKind = "client" + NodeKindDataplane NodeKind = "dataplane" +) + +// TODO: rename pod +type Node struct { + Kind NodeKind + Partition string // will be not empty + Name string // logical name + + // Images controls which specific docker images are used when running this + // node. Non-empty fields here override non-empty fields inherited from + // the enclosing Cluster. + Images Images + + // AgentEnv contains optional environment variables to attach to Consul agents. + AgentEnv []string + + Disabled bool `json:",omitempty"` + + Addresses []*Address + Services []*Service + + // denormalized at topology compile + Cluster string + Datacenter string + + // computed at topology compile + Index int + + // generated during network-and-tls + TLSCertPrefix string `json:",omitempty"` + + // dockerName is computed at topology compile + dockerName string + + // usedPorts has keys that are computed at topology compile (internal + // ports) and values initialized to zero until terraform creates the pods + // and extracts the exposed port values from output variables. + usedPorts map[int]int // keys are from compile / values are from terraform output vars +} + +func (n *Node) DockerName() string { + return n.dockerName +} + +func (n *Node) ExposedPort(internalPort int) int { + return n.usedPorts[internalPort] +} + +func (n *Node) SortedPorts() []int { + var out []int + for internalPort := range n.usedPorts { + out = append(out, internalPort) + } + sort.Ints(out) + return out +} + +func (n *Node) inheritFromExisting(existing *Node) { + n.TLSCertPrefix = existing.TLSCertPrefix + + merged := existing.usedPorts + for k, vNew := range n.usedPorts { + if _, present := merged[k]; !present { + merged[k] = vNew + } + } + n.usedPorts = merged +} + +func (n *Node) String() string { + return n.ID().String() +} + +func (n *Node) ID() NodeID { + return NewNodeID(n.Name, n.Partition) +} + +func (n *Node) CatalogID() NodeID { + return NewNodeID(n.PodName(), n.Partition) +} + +func (n *Node) PodName() string { + return n.dockerName + "-pod" +} + +func (n *Node) AddressByNetwork(name string) *Address { + for _, a := range n.Addresses { + if a.Network == name { + return a + } + } + return nil +} + +func (n *Node) LocalAddress() string { + for _, a := range n.Addresses { + if a.IsLocal() { + if a.IPAddress == "" { + panic("node has no assigned local address") + } + return a.IPAddress + } + } + panic("node has no local network") +} + +func (n *Node) HasPublicAddress() bool { + for _, a := range n.Addresses { + if a.IsPublic() { + return true + } + } + return false +} + +func (n *Node) LocalProxyPort() int { + for _, a := range n.Addresses { + if a.IsLocal() { + if a.ProxyPort > 0 { + return a.ProxyPort + } + panic("node has no assigned local address") + } + } + panic("node has no local network") +} + +func (n *Node) PublicAddress() string { + for _, a := range n.Addresses { + if a.IsPublic() { + if a.IPAddress == "" { + panic("node has no assigned public address") + } + return a.IPAddress + } + } + panic("node has no public network") +} + +func (n *Node) PublicProxyPort() int { + for _, a := range n.Addresses { + if a.IsPublic() { + if a.ProxyPort > 0 { + return a.ProxyPort + } + panic("node has no assigned public address") + } + } + panic("node has no public network") +} + +func (n *Node) IsServer() bool { + return n.Kind == NodeKindServer +} + +func (n *Node) IsAgent() bool { + return n.Kind == NodeKindServer || n.Kind == NodeKindClient +} + +func (n *Node) RunsWorkloads() bool { + return n.IsAgent() || n.IsDataplane() +} + +func (n *Node) IsDataplane() bool { + return n.Kind == NodeKindDataplane +} + +func (n *Node) SortedServices() []*Service { + var out []*Service + out = append(out, n.Services...) + sort.Slice(out, func(i, j int) bool { + mi := out[i].IsMeshGateway + mj := out[j].IsMeshGateway + if mi && !mi { + return false + } else if !mi && mj { + return true + } + return out[i].ID.Less(out[j].ID) + }) + return out +} + +// DigestExposedPorts returns true if it was changed. +func (n *Node) DigestExposedPorts(ports map[int]int) bool { + if reflect.DeepEqual(n.usedPorts, ports) { + return false + } + for internalPort := range n.usedPorts { + if v, ok := ports[internalPort]; ok { + n.usedPorts[internalPort] = v + } else { + panic(fmt.Sprintf( + "cluster %q node %q port %d not found in exposed list", + n.Cluster, + n.ID(), + internalPort, + )) + } + } + for _, svc := range n.Services { + svc.DigestExposedPorts(ports) + } + + return true +} + +func (n *Node) ServiceByID(sid ServiceID) *Service { + sid.Normalize() + for _, svc := range n.Services { + if svc.ID == sid { + return svc + } + } + panic("service not found: " + sid.String()) +} + +type ServiceAndNode struct { + Service *Service + Node *Node +} + +type Service struct { + ID ServiceID + Image string + Port int + ExposedPort int `json:",omitempty"` + + Disabled bool `json:",omitempty"` // TODO + + // TODO: expose extra port here? + + Meta map[string]string `json:",omitempty"` + + // TODO(rb): re-expose this perhaps? Protocol string `json:",omitempty"` // tcp|http (empty == tcp) + CheckHTTP string `json:",omitempty"` // url; will do a GET + CheckTCP string `json:",omitempty"` // addr; will do a socket open/close + + EnvoyAdminPort int + ExposedEnvoyAdminPort int `json:",omitempty"` + EnvoyPublicListenerPort int `json:",omitempty"` // agentless + + Command []string `json:",omitempty"` // optional + Env []string `json:",omitempty"` // optional + + DisableServiceMesh bool `json:",omitempty"` + IsMeshGateway bool `json:",omitempty"` + Upstreams []*Upstream + + // denormalized at topology compile + Node *Node `json:"-"` +} + +func (s *Service) inheritFromExisting(existing *Service) { + s.ExposedPort = existing.ExposedPort + s.ExposedEnvoyAdminPort = existing.ExposedEnvoyAdminPort +} + +func (s *Service) ports() []int { + var out []int + if s.Port > 0 { + out = append(out, s.Port) + } + if s.EnvoyAdminPort > 0 { + out = append(out, s.EnvoyAdminPort) + } + if s.EnvoyPublicListenerPort > 0 { + out = append(out, s.EnvoyPublicListenerPort) + } + for _, u := range s.Upstreams { + if u.LocalPort > 0 { + out = append(out, u.LocalPort) + } + } + return out +} + +func (s *Service) HasCheck() bool { + return s.CheckTCP != "" || s.CheckHTTP != "" +} + +func (s *Service) DigestExposedPorts(ports map[int]int) { + s.ExposedPort = ports[s.Port] + if s.EnvoyAdminPort > 0 { + s.ExposedEnvoyAdminPort = ports[s.EnvoyAdminPort] + } else { + s.ExposedEnvoyAdminPort = 0 + } +} + +func (s *Service) Validate() error { + if s.ID.Name == "" { + return fmt.Errorf("service name is required") + } + if s.Image == "" && !s.IsMeshGateway { + return fmt.Errorf("service image is required") + } + if s.Port <= 0 { + return fmt.Errorf("service has invalid port") + } + if s.DisableServiceMesh && s.IsMeshGateway { + return fmt.Errorf("cannot disable service mesh and still run a mesh gateway") + } + if s.DisableServiceMesh && len(s.Upstreams) > 0 { + return fmt.Errorf("cannot disable service mesh and configure upstreams") + } + + if s.DisableServiceMesh { + if s.EnvoyAdminPort != 0 { + return fmt.Errorf("cannot use envoy admin port without a service mesh") + } + } else { + if s.EnvoyAdminPort <= 0 { + return fmt.Errorf("envoy admin port is required") + } + } + + for _, u := range s.Upstreams { + if u.ID.Name == "" { + return fmt.Errorf("upstream service name is required") + } + if u.LocalPort <= 0 { + return fmt.Errorf("upstream local port is required") + } + + if u.LocalAddress != "" { + ip := net.ParseIP(u.LocalAddress) + if ip == nil { + return fmt.Errorf("upstream local address is invalid: %s", u.LocalAddress) + } + } + } + + return nil +} + +type Upstream struct { + ID ServiceID + LocalAddress string `json:",omitempty"` // defaults to 127.0.0.1 + LocalPort int + Peer string `json:",omitempty"` + // TODO: what about mesh gateway mode overrides? + + // computed at topology compile + Cluster string `json:",omitempty"` + Peering *PeerCluster `json:",omitempty"` // this will have Link!=nil +} + +type Peering struct { + Dialing PeerCluster + Accepting PeerCluster +} + +type PeerCluster struct { + Name string + Partition string + PeerName string // name to call it on this side; defaults if not specified + + // computed at topology compile (pointer so it can be empty in json) + Link *PeerCluster `json:",omitempty"` +} + +func (c PeerCluster) String() string { + return c.Name + ":" + c.Partition +} + +func (p *Peering) String() string { + return "(" + p.Dialing.String() + ")->(" + p.Accepting.String() + ")" +} diff --git a/testing/deployer/topology/util.go b/testing/deployer/topology/util.go new file mode 100644 index 000000000000..c09021763368 --- /dev/null +++ b/testing/deployer/topology/util.go @@ -0,0 +1,17 @@ +package topology + +func MergeSlices[V any](x, y []V) []V { + switch { + case len(x) == 0 && len(y) == 0: + return nil + case len(x) == 0: + return y + case len(y) == 0: + return x + } + + out := make([]V, 0, len(x)+len(y)) + out = append(out, x...) + out = append(out, y...) + return out +} diff --git a/testing/deployer/topology/util_test.go b/testing/deployer/topology/util_test.go new file mode 100644 index 000000000000..fa0b6670f369 --- /dev/null +++ b/testing/deployer/topology/util_test.go @@ -0,0 +1,11 @@ +package topology + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMergeSlices(t *testing.T) { + require.Nil(t, MergeSlices[int](nil, nil)) +} diff --git a/testing/deployer/util/consul.go b/testing/deployer/util/consul.go new file mode 100644 index 000000000000..5fe7a460e408 --- /dev/null +++ b/testing/deployer/util/consul.go @@ -0,0 +1,63 @@ +package util + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-cleanhttp" +) + +func ProxyNotPooledAPIClient(proxyPort int, containerIP string, containerPort int, token string) (*api.Client, error) { + return proxyAPIClient(cleanhttp.DefaultTransport(), proxyPort, containerIP, containerPort, token) +} + +func ProxyAPIClient(proxyPort int, containerIP string, containerPort int, token string) (*api.Client, error) { + return proxyAPIClient(cleanhttp.DefaultPooledTransport(), proxyPort, containerIP, containerPort, token) +} + +func proxyAPIClient(baseTransport *http.Transport, proxyPort int, containerIP string, containerPort int, token string) (*api.Client, error) { + if proxyPort <= 0 { + return nil, fmt.Errorf("cannot use an http proxy on port %d", proxyPort) + } + if containerIP == "" { + return nil, fmt.Errorf("container IP is required") + } + if containerPort <= 0 { + return nil, fmt.Errorf("cannot dial api client on port %d", containerPort) + } + + proxyURL, err := url.Parse("http://127.0.0.1:" + strconv.Itoa(proxyPort)) + if err != nil { + return nil, err + } + + cfg := api.DefaultConfig() + cfg.Transport = baseTransport + cfg.Transport.Proxy = http.ProxyURL(proxyURL) + cfg.Address = fmt.Sprintf("http://%s:%d", containerIP, containerPort) + cfg.Token = token + return api.NewClient(cfg) +} + +func ProxyNotPooledHTTPTransport(proxyPort int) (*http.Transport, error) { + return proxyHTTPTransport(cleanhttp.DefaultTransport(), proxyPort) +} + +func ProxyHTTPTransport(proxyPort int) (*http.Transport, error) { + return proxyHTTPTransport(cleanhttp.DefaultPooledTransport(), proxyPort) +} + +func proxyHTTPTransport(baseTransport *http.Transport, proxyPort int) (*http.Transport, error) { + if proxyPort <= 0 { + return nil, fmt.Errorf("cannot use an http proxy on port %d", proxyPort) + } + proxyURL, err := url.Parse("http://127.0.0.1:" + strconv.Itoa(proxyPort)) + if err != nil { + return nil, err + } + baseTransport.Proxy = http.ProxyURL(proxyURL) + return baseTransport, nil +} diff --git a/testing/deployer/util/files.go b/testing/deployer/util/files.go new file mode 100644 index 000000000000..fad1109d3207 --- /dev/null +++ b/testing/deployer/util/files.go @@ -0,0 +1,57 @@ +package util + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "golang.org/x/crypto/blake2b" +) + +func FilesExist(parent string, paths ...string) (bool, error) { + for _, p := range paths { + ok, err := FileExists(filepath.Join(parent, p)) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + } + return true, nil +} + +func FileExists(path string) (bool, error) { + _, err := os.Stat(path) + if os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } else { + return true, nil + } +} + +func HashFile(path string) (string, error) { + hash, err := blake2b.New256(nil) + if err != nil { + return "", err + } + + if err := AddFileToHash(path, hash); err != nil { + return "", err + } + + return fmt.Sprintf("%x", hash.Sum(nil)), nil +} + +func AddFileToHash(path string, w io.Writer) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(w, f) + return err +} diff --git a/testing/deployer/util/internal/ipamutils/doc.go b/testing/deployer/util/internal/ipamutils/doc.go new file mode 100644 index 000000000000..7820e3776201 --- /dev/null +++ b/testing/deployer/util/internal/ipamutils/doc.go @@ -0,0 +1,21 @@ +// Copyright 2015 Docker Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Originally from: +// https://github.com/moby/moby/blob/7489b51f610104ab5acc43f4e77142927e7b522e/libnetwork/ipamutils +// +// The only changes were to remove dead code from the package that we did not +// need, and to edit the tests to use github.com/stretchr/testify to avoid an +// extra dependency. +package ipamutils diff --git a/testing/deployer/util/internal/ipamutils/utils.go b/testing/deployer/util/internal/ipamutils/utils.go new file mode 100644 index 000000000000..a0bf403c0f4e --- /dev/null +++ b/testing/deployer/util/internal/ipamutils/utils.go @@ -0,0 +1,117 @@ +// Package ipamutils provides utility functions for ipam management +package ipamutils + +import ( + "fmt" + "net" + "sync" +) + +var ( + // predefinedLocalScopeDefaultNetworks contains a list of 31 IPv4 private networks with host size 16 and 12 + // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGlobalScopeDefaultNetworks` + predefinedLocalScopeDefaultNetworks []*net.IPNet + // predefinedGlobalScopeDefaultNetworks contains a list of 64K IPv4 private networks with host size 8 + // (10.x.x.x/24) which do not overlap with the networks in `PredefinedLocalScopeDefaultNetworks` + predefinedGlobalScopeDefaultNetworks []*net.IPNet + mutex sync.Mutex + localScopeDefaultNetworks = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16}, + {"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16}, + {"192.168.0.0/16", 20}} + globalScopeDefaultNetworks = []*NetworkToSplit{{"10.0.0.0/8", 24}} +) + +// NetworkToSplit represent a network that has to be split in chunks with mask length Size. +// Each subnet in the set is derived from the Base pool. Base is to be passed +// in CIDR format. +// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256 +// 10.10.[0-255].0/24 address pools +type NetworkToSplit struct { + Base string `json:"base"` + Size int `json:"size"` +} + +func init() { + var err error + if predefinedGlobalScopeDefaultNetworks, err = SplitNetworks(globalScopeDefaultNetworks); err != nil { + panic("failed to initialize the global scope default address pool: " + err.Error()) + } + + if predefinedLocalScopeDefaultNetworks, err = SplitNetworks(localScopeDefaultNetworks); err != nil { + panic("failed to initialize the local scope default address pool: " + err.Error()) + } +} + +// ConfigGlobalScopeDefaultNetworks configures global default pool. +// Ideally this will be called from SwarmKit as part of swarm init +func ConfigGlobalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error { + if defaultAddressPool == nil { + return nil + } + mutex.Lock() + defer mutex.Unlock() + defaultNetworks, err := SplitNetworks(defaultAddressPool) + if err != nil { + return err + } + predefinedGlobalScopeDefaultNetworks = defaultNetworks + return nil +} + +// GetGlobalScopeDefaultNetworks returns a copy of the global-sopce network list. +func GetGlobalScopeDefaultNetworks() []*net.IPNet { + mutex.Lock() + defer mutex.Unlock() + return append([]*net.IPNet(nil), predefinedGlobalScopeDefaultNetworks...) +} + +// GetLocalScopeDefaultNetworks returns a copy of the default local-scope network list. +func GetLocalScopeDefaultNetworks() []*net.IPNet { + return append([]*net.IPNet(nil), predefinedLocalScopeDefaultNetworks...) +} + +// SplitNetworks takes a slice of networks, split them accordingly and returns them +func SplitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) { + localPools := make([]*net.IPNet, 0, len(list)) + + for _, p := range list { + _, b, err := net.ParseCIDR(p.Base) + if err != nil { + return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err) + } + ones, _ := b.Mask.Size() + if p.Size <= 0 || p.Size < ones { + return nil, fmt.Errorf("invalid pools size: %d", p.Size) + } + localPools = append(localPools, splitNetwork(p.Size, b)...) + } + return localPools, nil +} + +func splitNetwork(size int, base *net.IPNet) []*net.IPNet { + one, bits := base.Mask.Size() + mask := net.CIDRMask(size, bits) + n := 1 << uint(size-one) + s := uint(bits - size) + list := make([]*net.IPNet, 0, n) + + for i := 0; i < n; i++ { + ip := copyIP(base.IP) + addIntToIP(ip, uint(i<= 0; i-- { + array[i] |= (byte)(ordinal & 0xff) + ordinal >>= 8 + } +} diff --git a/testing/deployer/util/internal/ipamutils/utils_test.go b/testing/deployer/util/internal/ipamutils/utils_test.go new file mode 100644 index 000000000000..dd3c0e701504 --- /dev/null +++ b/testing/deployer/util/internal/ipamutils/utils_test.go @@ -0,0 +1,102 @@ +package ipamutils + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func initBroadPredefinedNetworks() []*net.IPNet { + pl := make([]*net.IPNet, 0, 31) + mask := []byte{255, 255, 0, 0} + for i := 17; i < 32; i++ { + pl = append(pl, &net.IPNet{IP: []byte{172, byte(i), 0, 0}, Mask: mask}) + } + mask20 := []byte{255, 255, 240, 0} + for i := 0; i < 16; i++ { + pl = append(pl, &net.IPNet{IP: []byte{192, 168, byte(i << 4), 0}, Mask: mask20}) + } + return pl +} + +func initGranularPredefinedNetworks() []*net.IPNet { + pl := make([]*net.IPNet, 0, 256*256) + mask := []byte{255, 255, 255, 0} + for i := 0; i < 256; i++ { + for j := 0; j < 256; j++ { + pl = append(pl, &net.IPNet{IP: []byte{10, byte(i), byte(j), 0}, Mask: mask}) + } + } + return pl +} + +func initGlobalScopeNetworks() []*net.IPNet { + pl := make([]*net.IPNet, 0, 256*256) + mask := []byte{255, 255, 255, 0} + for i := 0; i < 256; i++ { + for j := 0; j < 256; j++ { + pl = append(pl, &net.IPNet{IP: []byte{30, byte(i), byte(j), 0}, Mask: mask}) + } + } + return pl +} + +func TestDefaultNetwork(t *testing.T) { + for _, nw := range GetGlobalScopeDefaultNetworks() { + if ones, bits := nw.Mask.Size(); bits != 32 || ones != 24 { + t.Fatalf("Unexpected size for network in granular list: %v", nw) + } + } + + for _, nw := range GetLocalScopeDefaultNetworks() { + if ones, bits := nw.Mask.Size(); bits != 32 || (ones != 20 && ones != 16) { + t.Fatalf("Unexpected size for network in broad list: %v", nw) + } + } + + originalBroadNets := initBroadPredefinedNetworks() + m := make(map[string]bool) + for _, v := range originalBroadNets { + m[v.String()] = true + } + for _, nw := range GetLocalScopeDefaultNetworks() { + _, ok := m[nw.String()] + assert.True(t, ok) + delete(m, nw.String()) + } + + assert.Empty(t, m) + + originalGranularNets := initGranularPredefinedNetworks() + + m = make(map[string]bool) + for _, v := range originalGranularNets { + m[v.String()] = true + } + for _, nw := range GetGlobalScopeDefaultNetworks() { + _, ok := m[nw.String()] + assert.True(t, ok) + delete(m, nw.String()) + } + + assert.Empty(t, m) +} + +func TestConfigGlobalScopeDefaultNetworks(t *testing.T) { + err := ConfigGlobalScopeDefaultNetworks([]*NetworkToSplit{{"30.0.0.0/8", 24}}) + assert.NoError(t, err) + + originalGlobalScopeNetworks := initGlobalScopeNetworks() + m := make(map[string]bool) + for _, v := range originalGlobalScopeNetworks { + m[v.String()] = true + } + for _, nw := range GetGlobalScopeDefaultNetworks() { + _, ok := m[nw.String()] + assert.True(t, ok) + delete(m, nw.String()) + } + + assert.Empty(t, m) +} diff --git a/testing/deployer/util/net.go b/testing/deployer/util/net.go new file mode 100644 index 000000000000..0ca297d2051a --- /dev/null +++ b/testing/deployer/util/net.go @@ -0,0 +1,17 @@ +package util + +import ( + "github.com/hashicorp/consul/testing/deployer/util/internal/ipamutils" +) + +// GetPossibleDockerNetworkSubnets returns a copy of the global-scope network list. +func GetPossibleDockerNetworkSubnets() map[string]struct{} { + list := ipamutils.GetGlobalScopeDefaultNetworks() + + out := make(map[string]struct{}) + for _, ipnet := range list { + subnet := ipnet.String() + out[subnet] = struct{}{} + } + return out +} From 921445712eea2e949afb1888d2c81a245fdcec93 Mon Sep 17 00:00:00 2001 From: Ronald Date: Tue, 18 Jul 2023 14:59:01 -0400 Subject: [PATCH 43/43] [NET-4792] Add integrations tests for jwt-auth (#18169) --- .../consul-container/libs/cluster/cluster.go | 2 +- .../consul-container/libs/service/connect.go | 20 +- .../test/jwtauth/jwt_auth_test.go | 229 ++++++++++++------ 3 files changed, 168 insertions(+), 83 deletions(-) diff --git a/test/integration/consul-container/libs/cluster/cluster.go b/test/integration/consul-container/libs/cluster/cluster.go index e5f6537c166e..aedf0ac9267a 100644 --- a/test/integration/consul-container/libs/cluster/cluster.go +++ b/test/integration/consul-container/libs/cluster/cluster.go @@ -8,7 +8,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/hashicorp/consul/test/integration/consul-container/libs/utils" "os" "path/filepath" "strconv" @@ -18,6 +17,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/consul/test/integration/consul-container/libs/utils" "github.com/hashicorp/serf/serf" goretry "github.com/avast/retry-go" diff --git a/test/integration/consul-container/libs/service/connect.go b/test/integration/consul-container/libs/service/connect.go index 006ca804a988..4251a6d3c89a 100644 --- a/test/integration/consul-container/libs/service/connect.go +++ b/test/integration/consul-container/libs/service/connect.go @@ -167,14 +167,13 @@ func NewConnectService(ctx context.Context, sidecarCfg SidecarConfig, serviceBin namePrefix := fmt.Sprintf("%s-service-connect-%s", node.GetDatacenter(), sidecarCfg.Name) containerName := utils.RandName(namePrefix) - agentConfig := node.GetConfig() internalAdminPort, err := node.ClaimAdminPort() if err != nil { return nil, err } - fmt.Println("agent image name", agentConfig.DockerImage()) - imageVersion := utils.SideCarVersion(agentConfig.DockerImage()) + fmt.Println("agent image name", nodeConfig.DockerImage()) + imageVersion := utils.SideCarVersion(nodeConfig.DockerImage()) req := testcontainers.ContainerRequest{ Image: fmt.Sprintf("consul-envoy:%s", imageVersion), WaitingFor: wait.ForLog("").WithStartupTimeout(100 * time.Second), @@ -238,6 +237,21 @@ func NewConnectService(ctx context.Context, sidecarCfg SidecarConfig, serviceBin req.Env["CONSUL_GRPC_ADDR"] = fmt.Sprintf("http://127.0.0.1:%d", 8502) } + if nodeConfig.ACLEnabled { + client := node.GetClient() + token, _, err := client.ACL().TokenCreate(&api.ACLToken{ + ServiceIdentities: []*api.ACLServiceIdentity{ + {ServiceName: sidecarCfg.ServiceID}, + }, + }, nil) + + if err != nil { + return nil, err + } + + req.Env["CONSUL_HTTP_TOKEN"] = token.SecretID + } + var ( appPortStrs []string adminPortStr = strconv.Itoa(internalAdminPort) diff --git a/test/integration/consul-container/test/jwtauth/jwt_auth_test.go b/test/integration/consul-container/test/jwtauth/jwt_auth_test.go index 498bdcedf181..2ff3938f92d6 100644 --- a/test/integration/consul-container/test/jwtauth/jwt_auth_test.go +++ b/test/integration/consul-container/test/jwtauth/jwt_auth_test.go @@ -24,20 +24,22 @@ import ( "time" ) -// TestJWTAuthConnectService summary +// TestJWTAuthConnectService summary: // This test ensures that when we have an intention referencing a JWT, requests -// without JWT authorization headers are denied. And requests with the correct JWT -// Authorization header are successful +// without JWT authorization headers are denied and requests with the correct JWT +// Authorization header are successful. // // Steps: // - Creates a single agent cluster +// - Generates a JWKS and 2 JWTs with different claims +// - Generates another JWKS with a single JWT +// - Configures proxy defaults, providers and intentions // - Creates a static-server and sidecar containers // - Registers the created static-server and sidecar with consul // - Create a static-client and sidecar containers // - Registers the static-client and sidecar with consul // - Ensure client sidecar is running as expected -// - Make a request without the JWT Authorization header and expects 401 StatusUnauthorized -// - Make a request with the JWT Authorization header and expects a 200 +// - Runs a couple of scenarios to ensure jwt validation works as expected func TestJWTAuthConnectService(t *testing.T) { t.Parallel() @@ -47,39 +49,65 @@ func TestJWTAuthConnectService(t *testing.T) { ApplyDefaultProxySettings: true, BuildOpts: &libcluster.BuildOptions{ Datacenter: "dc1", - InjectAutoEncryption: true, - InjectGossipEncryption: true, + InjectCerts: true, + InjectGossipEncryption: false, + AllowHTTPAnyway: true, + ACLEnabled: true, }, }) + // generate jwks and 2 jwts with different claims for provider 1 + jwksOne, privOne := makeJWKS(t) + claimsOne := makeTestClaims("https://legit.issuer.internal/", "https://consul.test") + jwtOne := makeJWT(t, privOne, claimsOne, testClaimPayload{UserType: "admin", FirstName: "admin"}) + jwtOneAdmin := makeJWT(t, privOne, claimsOne, testClaimPayload{UserType: "client", FirstName: "non-admin"}) + provider1 := makeTestJWTProvider("okta", jwksOne, claimsOne) + + // generate another jwks and jwt for provider 2 + jwksTwo, privTwo := makeJWKS(t) + claimsTwo := makeTestClaims("https://another.issuer.internal/", "https://consul.test") + jwtTwo := makeJWT(t, privTwo, claimsTwo, testClaimPayload{}) + provider2 := makeTestJWTProvider("auth0", jwksTwo, claimsTwo) + + // configure proxy-defaults, jwt providers and intentions + configureProxyDefaults(t, cluster) + configureJWTProviders(t, cluster, provider1, provider2) + configureIntentions(t, cluster, provider1, provider2) + clientService := createServices(t, cluster) _, clientPort := clientService.GetAddr() - _, clientAdminPort := clientService.GetAdminAddr() + _, adminPort := clientService.GetAdminAddr() - libassert.AssertUpstreamEndpointStatus(t, clientAdminPort, "static-server.default", "HEALTHY", 1) libassert.AssertContainerState(t, clientService, "running") - libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", clientPort), "static-server", "") + libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 1) - claims := jwt.Claims{ - Subject: "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", - Audience: jwt.Audience{"https://consul.test"}, - Issuer: "https://legit.issuer.internal/", - NotBefore: jwt.NewNumericDate(time.Now().Add(-5 * time.Second)), - Expiry: jwt.NewNumericDate(time.Now().Add(60 * time.Minute)), - } + // request to restricted endpoint with no jwt should be denied + doRequest(t, fmt.Sprintf("http://localhost:%d/restricted/foo", clientPort), http.StatusForbidden, "") - jwks, jwt := makeJWKSAndJWT(t, claims) + // request with jwt 1 /restricted/foo should be disallowed + doRequest(t, fmt.Sprintf("http://localhost:%d/restricted/foo", clientPort), http.StatusForbidden, jwtOne) - // configure proxy-defaults, jwt-provider and intention - configureProxyDefaults(t, cluster) - configureJWTProvider(t, cluster, jwks, claims) - configureIntentions(t, cluster) - - baseURL := fmt.Sprintf("http://localhost:%d", clientPort) - // TODO(roncodingenthusiast): update test to reflect jwt-auth filter in metadata mode - doRequest(t, baseURL, http.StatusOK, "") - // succeeds with jwt - doRequest(t, baseURL, http.StatusOK, jwt) + // request with jwt 1 /other/foo should be allowed + libassert.HTTPServiceEchoesWithHeaders(t, "localhost", clientPort, "other/foo", makeAuthHeaders(jwtOne)) + + // request with jwt 1 /other/foo with mismatched claims should be disallowed + doRequest(t, fmt.Sprintf("http://localhost:%d/other/foo", clientPort), http.StatusForbidden, jwtOneAdmin) + + // request with provider 1 /foo should be allowed + libassert.HTTPServiceEchoesWithHeaders(t, "localhost", clientPort, "foo", makeAuthHeaders(jwtOne)) + + // request with jwt 2 to /foo should be denied + doRequest(t, fmt.Sprintf("http://localhost:%d/foo", clientPort), http.StatusForbidden, jwtTwo) + + // request with jwt 2 to /restricted/foo should be allowed + libassert.HTTPServiceEchoesWithHeaders(t, "localhost", clientPort, "restricted/foo", makeAuthHeaders(jwtTwo)) + + // request with jwt 2 to /other/foo should be denied + doRequest(t, fmt.Sprintf("http://localhost:%d/other/foo", clientPort), http.StatusForbidden, jwtTwo) +} + +func makeAuthHeaders(jwt string) map[string]string { + return map[string]string{"Authorization": fmt.Sprintf("Bearer %s", jwt)} } func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service { @@ -92,25 +120,25 @@ func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Servic HTTPPort: 8080, GRPCPort: 8079, } + apiOpts := &api.QueryOptions{Token: cluster.TokenBootstrap} // Create a service and proxy instance _, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts) require.NoError(t, err) - libassert.CatalogServiceExists(t, client, "static-server-sidecar-proxy", nil) - libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, nil) + libassert.CatalogServiceExists(t, client, "static-server-sidecar-proxy", apiOpts) + libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, apiOpts) // Create a client proxy instance with the server as an upstream clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false, false) require.NoError(t, err) - libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy", nil) + libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy", apiOpts) return clientConnectProxy } -// creates a JWKS and JWT that will be used for validation -func makeJWKSAndJWT(t *testing.T, claims jwt.Claims) (string, string) { +func makeJWKS(t *testing.T) (string, string) { pub, priv, err := libutils.GenerateKey() require.NoError(t, err) @@ -120,46 +148,36 @@ func makeJWKSAndJWT(t *testing.T, claims jwt.Claims) (string, string) { jwksJson, err := json.Marshal(jwks) require.NoError(t, err) - type orgs struct { - Primary string `json:"primary"` - } - privateCl := struct { - FirstName string `json:"first_name"` - Org orgs `json:"org"` - Groups []string `json:"groups"` - }{ - FirstName: "jeff2", - Org: orgs{"engineering"}, - Groups: []string{"foo", "bar"}, - } + return string(jwksJson), priv +} + +type testClaimPayload struct { + UserType string + FirstName string +} - jwt, err := libutils.SignJWT(priv, claims, privateCl) +func makeJWT(t *testing.T, priv string, claims jwt.Claims, payload testClaimPayload) string { + jwt, err := libutils.SignJWT(priv, claims, payload) require.NoError(t, err) - return string(jwksJson), jwt + + return jwt } // configures the protocol to http as this is needed for jwt-auth func configureProxyDefaults(t *testing.T, cluster *libcluster.Cluster) { - client := cluster.Agents[0].GetClient() - - ok, _, err := client.ConfigEntries().Set(&api.ProxyConfigEntry{ + require.NoError(t, cluster.ConfigEntryWrite(&api.ProxyConfigEntry{ Kind: api.ProxyDefaults, Name: api.ProxyConfigGlobal, Config: map[string]interface{}{ "protocol": "http", }, - }, nil) - require.NoError(t, err) - require.True(t, ok) + })) } -// creates a JWT local provider -func configureJWTProvider(t *testing.T, cluster *libcluster.Cluster, jwks string, claims jwt.Claims) { - client := cluster.Agents[0].GetClient() - - ok, _, err := client.ConfigEntries().Set(&api.JWTProviderConfigEntry{ +func makeTestJWTProvider(name string, jwks string, claims jwt.Claims) *api.JWTProviderConfigEntry { + return &api.JWTProviderConfigEntry{ Kind: api.JWTProvider, - Name: "test-jwt", + Name: name, JSONWebKeySet: &api.JSONWebKeySet{ Local: &api.LocalJWKS{ JWKS: base64.StdEncoding.EncodeToString([]byte(jwks)), @@ -167,42 +185,85 @@ func configureJWTProvider(t *testing.T, cluster *libcluster.Cluster, jwks string }, Issuer: claims.Issuer, Audiences: claims.Audience, - }, nil) - require.NoError(t, err) - require.True(t, ok) + } } -// creates an intention referencing the jwt provider -func configureIntentions(t *testing.T, cluster *libcluster.Cluster) { - client := cluster.Agents[0].GetClient() +// creates a JWT local provider +func configureJWTProviders(t *testing.T, cluster *libcluster.Cluster, providers ...*api.JWTProviderConfigEntry) { + for _, prov := range providers { + require.NoError(t, cluster.ConfigEntryWrite(prov)) + } +} - ok, _, err := client.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ +// creates an intention referencing the jwt provider +func configureIntentions(t *testing.T, cluster *libcluster.Cluster, provider1, provider2 *api.JWTProviderConfigEntry) { + intention := api.ServiceIntentionsConfigEntry{ Kind: "service-intentions", Name: libservice.StaticServerServiceName, Sources: []*api.SourceIntention{ { - Name: libservice.StaticClientServiceName, - Action: api.IntentionActionAllow, + Name: libservice.StaticClientServiceName, + Permissions: []*api.IntentionPermission{ + { + Action: api.IntentionActionAllow, + HTTP: &api.IntentionHTTPPermission{ + PathPrefix: "/restricted/", + }, + JWT: &api.IntentionJWTRequirement{ + Providers: []*api.IntentionJWTProvider{ + { + Name: provider2.Name, + }, + }, + }, + }, + { + Action: api.IntentionActionAllow, + HTTP: &api.IntentionHTTPPermission{ + PathPrefix: "/", + }, + JWT: &api.IntentionJWTRequirement{ + Providers: []*api.IntentionJWTProvider{ + { + Name: provider1.Name, + VerifyClaims: []*api.IntentionJWTClaimVerification{ + { + Path: []string{"UserType"}, + Value: "admin", + }, + }, + }, + }, + }, + }, + }, }, - }, - JWT: &api.IntentionJWTRequirement{ - Providers: []*api.IntentionJWTProvider{ - { - Name: "test-jwt", - VerifyClaims: []*api.IntentionJWTClaimVerification{}, + { + Name: "other-client", + Permissions: []*api.IntentionPermission{ + { + Action: api.IntentionActionAllow, + HTTP: &api.IntentionHTTPPermission{ + PathPrefix: "/other/", + }, + JWT: &api.IntentionJWTRequirement{ + Providers: []*api.IntentionJWTProvider{ + { + Name: provider2.Name, + }, + }, + }, + }, }, }, }, - }, nil) - require.NoError(t, err) - require.True(t, ok) + } + require.NoError(t, cluster.ConfigEntryWrite(&intention)) } func doRequest(t *testing.T, url string, expStatus int, jwt string) { retry.RunWith(&retry.Timer{Timeout: 5 * time.Second, Wait: time.Second}, t, func(r *retry.R) { - client := cleanhttp.DefaultClient() - req, err := http.NewRequest("GET", url, nil) require.NoError(r, err) if jwt != "" { @@ -213,3 +274,13 @@ func doRequest(t *testing.T, url string, expStatus int, jwt string) { require.Equal(r, expStatus, resp.StatusCode) }) } + +func makeTestClaims(issuer, audience string) jwt.Claims { + return jwt.Claims{ + Subject: "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + Audience: jwt.Audience{audience}, + Issuer: issuer, + NotBefore: jwt.NewNumericDate(time.Now().Add(-5 * time.Second)), + Expiry: jwt.NewNumericDate(time.Now().Add(60 * time.Minute)), + } +}