From 20d1fb8c78443324463d8f7919b1d69f311eaada Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Fri, 13 Oct 2023 15:32:06 -0500 Subject: [PATCH 001/130] server: run the api checks against the path without params (#19205) --- agent/agent_endpoint_test.go | 12 ++++++++++++ agent/http.go | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 1a021d7b8e4c..ab9a44ff9877 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -90,6 +90,18 @@ func TestAgentEndpointsFailInV2(t *testing.T) { }) } + t.Run("agent-self-with-params", func(t *testing.T) { + req, err := http.NewRequest("GET", "/v1/agent/self?dc=dc1", nil) + require.NoError(t, err) + + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + _, err = io.ReadAll(resp.Body) + require.NoError(t, err) + }) + checkRequest("PUT", "/v1/agent/maintenance") checkRequest("GET", "/v1/agent/services") checkRequest("GET", "/v1/agent/service/web") diff --git a/agent/http.go b/agent/http.go index e95d36c914f8..aed7c920cdf9 100644 --- a/agent/http.go +++ b/agent/http.go @@ -396,7 +396,7 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc rejectCatalogV1Endpoint := false if s.agent.baseDeps.UseV2Resources() { - rejectCatalogV1Endpoint = isV1CatalogRequest(logURL) + rejectCatalogV1Endpoint = isV1CatalogRequest(req.URL.Path) } if s.denylist.Block(req.URL.Path) { From 3d1a606c3b58add3884f8819257cf96fbcc842dc Mon Sep 17 00:00:00 2001 From: Ashwin Venkatesh Date: Fri, 13 Oct 2023 16:41:22 -0400 Subject: [PATCH 002/130] Clone proto into deepcopy correctly (#19204) --- .../internal/generate/generate.go | 4 +- .../ratelimit/ratelimit_deepcopy.gen.go | 4 +- proto-public/pbacl/acl_deepcopy.gen.go | 20 ++-- ...mputed_traffic_permissions_deepcopy.gen.go | 4 +- .../traffic_permissions_deepcopy.gen.go | 40 +++---- .../v2beta1/workload_identity_deepcopy.gen.go | 4 +- .../pbcatalog/v2beta1/dns_deepcopy.gen.go | 8 +- .../v2beta1/failover_policy_deepcopy.gen.go | 12 +- .../pbcatalog/v2beta1/health_deepcopy.gen.go | 36 +++--- .../pbcatalog/v2beta1/node_deepcopy.gen.go | 8 +- .../v2beta1/selector_deepcopy.gen.go | 4 +- .../pbcatalog/v2beta1/service_deepcopy.gen.go | 8 +- .../v2beta1/service_endpoints_deepcopy.gen.go | 8 +- .../pbcatalog/v2beta1/vip_deepcopy.gen.go | 8 +- .../v2beta1/workload_deepcopy.gen.go | 16 +-- proto-public/pbconnectca/ca_deepcopy.gen.go | 20 ++-- .../pbdataplane/dataplane_deepcopy.gen.go | 20 ++-- proto-public/pbdns/dns_deepcopy.gen.go | 8 +- .../pbmesh/v2beta1/common_deepcopy.gen.go | 8 +- ...uted_explicit_destinations_deepcopy.gen.go | 4 +- ...mputed_proxy_configuration_deepcopy.gen.go | 4 +- .../v2beta1/computed_routes_deepcopy.gen.go | 56 +++++----- .../pbmesh/v2beta1/connection_deepcopy.gen.go | 8 +- .../destination_policy_deepcopy.gen.go | 32 +++--- ...destinations_configuration_deepcopy.gen.go | 20 ++-- .../v2beta1/destinations_deepcopy.gen.go | 20 ++-- .../pbmesh/v2beta1/expose_deepcopy.gen.go | 8 +- .../pbmesh/v2beta1/grpc_route_deepcopy.gen.go | 28 ++--- .../pbmesh/v2beta1/http_route_deepcopy.gen.go | 44 ++++---- .../http_route_retries_deepcopy.gen.go | 4 +- .../http_route_timeouts_deepcopy.gen.go | 4 +- .../pbproxystate/access_logs_deepcopy.gen.go | 4 +- .../pbproxystate/address_deepcopy.gen.go | 8 +- .../pbproxystate/cluster_deepcopy.gen.go | 104 +++++++++--------- .../pbproxystate/endpoints_deepcopy.gen.go | 8 +- .../escape_hatches_deepcopy.gen.go | 4 +- .../header_mutations_deepcopy.gen.go | 24 ++-- .../pbproxystate/listener_deepcopy.gen.go | 32 +++--- .../pbproxystate/references_deepcopy.gen.go | 12 +- .../pbproxystate/route_deepcopy.gen.go | 64 +++++------ .../traffic_permissions_deepcopy.gen.go | 16 +-- .../transport_socket_deepcopy.gen.go | 52 ++++----- .../proxy_configuration_deepcopy.gen.go | 24 ++-- .../v2beta1/proxy_state_deepcopy.gen.go | 8 +- .../pbmesh/v2beta1/tcp_route_deepcopy.gen.go | 12 +- .../pbresource/annotations_deepcopy.gen.go | 4 +- .../pbresource/resource_deepcopy.gen.go | 88 +++++++-------- .../serverdiscovery_deepcopy.gen.go | 12 +- .../v1alpha1/namespace_deepcopy.gen.go | 4 +- 49 files changed, 476 insertions(+), 476 deletions(-) diff --git a/internal/resource/protoc-gen-deepcopy/internal/generate/generate.go b/internal/resource/protoc-gen-deepcopy/internal/generate/generate.go index f9af3fa62a44..afa8dda7f621 100644 --- a/internal/resource/protoc-gen-deepcopy/internal/generate/generate.go +++ b/internal/resource/protoc-gen-deepcopy/internal/generate/generate.go @@ -37,8 +37,8 @@ func Generate(gen *protogen.Plugin) error { // Generate DeepCopyInto() method for this type p.P(`// DeepCopyInto supports using `, typeName, ` within kubernetes types, where deepcopy-gen is used.`) p.P(`func (in *`, typeName, `) DeepCopyInto(out *`, typeName, `) {`) - p.P(`p := `, protoIdent, `(in).(*`, typeName, `)`) - p.P(`*out = *p`) + p.P(`proto.Reset(out)`) + p.P(`proto.Merge(out,`, protoIdent, `(in))`) p.P(`}`) // Generate DeepCopy() method for this type diff --git a/proto-public/annotations/ratelimit/ratelimit_deepcopy.gen.go b/proto-public/annotations/ratelimit/ratelimit_deepcopy.gen.go index 72ef4d990c62..6c1c69633734 100644 --- a/proto-public/annotations/ratelimit/ratelimit_deepcopy.gen.go +++ b/proto-public/annotations/ratelimit/ratelimit_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Spec within kubernetes types, where deepcopy-gen is used. func (in *Spec) DeepCopyInto(out *Spec) { - p := proto.Clone(in).(*Spec) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec. Required by controller-gen. diff --git a/proto-public/pbacl/acl_deepcopy.gen.go b/proto-public/pbacl/acl_deepcopy.gen.go index 9bf1dcb45766..b3b069fe1fc8 100644 --- a/proto-public/pbacl/acl_deepcopy.gen.go +++ b/proto-public/pbacl/acl_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using LogoutResponse within kubernetes types, where deepcopy-gen is used. func (in *LogoutResponse) DeepCopyInto(out *LogoutResponse) { - p := proto.Clone(in).(*LogoutResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogoutResponse. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *LogoutResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using LoginRequest within kubernetes types, where deepcopy-gen is used. func (in *LoginRequest) DeepCopyInto(out *LoginRequest) { - p := proto.Clone(in).(*LoginRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoginRequest. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *LoginRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using LoginResponse within kubernetes types, where deepcopy-gen is used. func (in *LoginResponse) DeepCopyInto(out *LoginResponse) { - p := proto.Clone(in).(*LoginResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoginResponse. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *LoginResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using LoginToken within kubernetes types, where deepcopy-gen is used. func (in *LoginToken) DeepCopyInto(out *LoginToken) { - p := proto.Clone(in).(*LoginToken) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoginToken. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *LoginToken) DeepCopyInterface() interface{} { // DeepCopyInto supports using LogoutRequest within kubernetes types, where deepcopy-gen is used. func (in *LogoutRequest) DeepCopyInto(out *LogoutRequest) { - p := proto.Clone(in).(*LogoutRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogoutRequest. Required by controller-gen. diff --git a/proto-public/pbauth/v2beta1/computed_traffic_permissions_deepcopy.gen.go b/proto-public/pbauth/v2beta1/computed_traffic_permissions_deepcopy.gen.go index b0b3baa08721..7c22d2285c2d 100644 --- a/proto-public/pbauth/v2beta1/computed_traffic_permissions_deepcopy.gen.go +++ b/proto-public/pbauth/v2beta1/computed_traffic_permissions_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ComputedTrafficPermissions within kubernetes types, where deepcopy-gen is used. func (in *ComputedTrafficPermissions) DeepCopyInto(out *ComputedTrafficPermissions) { - p := proto.Clone(in).(*ComputedTrafficPermissions) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedTrafficPermissions. Required by controller-gen. diff --git a/proto-public/pbauth/v2beta1/traffic_permissions_deepcopy.gen.go b/proto-public/pbauth/v2beta1/traffic_permissions_deepcopy.gen.go index 177c60c14a89..b954d00b407f 100644 --- a/proto-public/pbauth/v2beta1/traffic_permissions_deepcopy.gen.go +++ b/proto-public/pbauth/v2beta1/traffic_permissions_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using TrafficPermissions within kubernetes types, where deepcopy-gen is used. func (in *TrafficPermissions) DeepCopyInto(out *TrafficPermissions) { - p := proto.Clone(in).(*TrafficPermissions) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficPermissions. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *TrafficPermissions) DeepCopyInterface() interface{} { // DeepCopyInto supports using NamespaceTrafficPermissions within kubernetes types, where deepcopy-gen is used. func (in *NamespaceTrafficPermissions) DeepCopyInto(out *NamespaceTrafficPermissions) { - p := proto.Clone(in).(*NamespaceTrafficPermissions) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceTrafficPermissions. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *NamespaceTrafficPermissions) DeepCopyInterface() interface{} { // DeepCopyInto supports using PartitionTrafficPermissions within kubernetes types, where deepcopy-gen is used. func (in *PartitionTrafficPermissions) DeepCopyInto(out *PartitionTrafficPermissions) { - p := proto.Clone(in).(*PartitionTrafficPermissions) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionTrafficPermissions. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *PartitionTrafficPermissions) DeepCopyInterface() interface{} { // DeepCopyInto supports using Destination within kubernetes types, where deepcopy-gen is used. func (in *Destination) DeepCopyInto(out *Destination) { - p := proto.Clone(in).(*Destination) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *Destination) DeepCopyInterface() interface{} { // DeepCopyInto supports using Permission within kubernetes types, where deepcopy-gen is used. func (in *Permission) DeepCopyInto(out *Permission) { - p := proto.Clone(in).(*Permission) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Permission. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *Permission) DeepCopyInterface() interface{} { // DeepCopyInto supports using Source within kubernetes types, where deepcopy-gen is used. func (in *Source) DeepCopyInto(out *Source) { - p := proto.Clone(in).(*Source) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Source. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *Source) DeepCopyInterface() interface{} { // DeepCopyInto supports using ExcludeSource within kubernetes types, where deepcopy-gen is used. func (in *ExcludeSource) DeepCopyInto(out *ExcludeSource) { - p := proto.Clone(in).(*ExcludeSource) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludeSource. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *ExcludeSource) DeepCopyInterface() interface{} { // DeepCopyInto supports using DestinationRule within kubernetes types, where deepcopy-gen is used. func (in *DestinationRule) DeepCopyInto(out *DestinationRule) { - p := proto.Clone(in).(*DestinationRule) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRule. Required by controller-gen. @@ -175,8 +175,8 @@ func (in *DestinationRule) DeepCopyInterface() interface{} { // DeepCopyInto supports using ExcludePermissionRule within kubernetes types, where deepcopy-gen is used. func (in *ExcludePermissionRule) DeepCopyInto(out *ExcludePermissionRule) { - p := proto.Clone(in).(*ExcludePermissionRule) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludePermissionRule. Required by controller-gen. @@ -196,8 +196,8 @@ func (in *ExcludePermissionRule) DeepCopyInterface() interface{} { // DeepCopyInto supports using DestinationRuleHeader within kubernetes types, where deepcopy-gen is used. func (in *DestinationRuleHeader) DeepCopyInto(out *DestinationRuleHeader) { - p := proto.Clone(in).(*DestinationRuleHeader) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleHeader. Required by controller-gen. diff --git a/proto-public/pbauth/v2beta1/workload_identity_deepcopy.gen.go b/proto-public/pbauth/v2beta1/workload_identity_deepcopy.gen.go index 7684b5ef837f..7a25aba74a48 100644 --- a/proto-public/pbauth/v2beta1/workload_identity_deepcopy.gen.go +++ b/proto-public/pbauth/v2beta1/workload_identity_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using WorkloadIdentity within kubernetes types, where deepcopy-gen is used. func (in *WorkloadIdentity) DeepCopyInto(out *WorkloadIdentity) { - p := proto.Clone(in).(*WorkloadIdentity) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadIdentity. Required by controller-gen. diff --git a/proto-public/pbcatalog/v2beta1/dns_deepcopy.gen.go b/proto-public/pbcatalog/v2beta1/dns_deepcopy.gen.go index f2a7b3564fc9..9a3d883b8cec 100644 --- a/proto-public/pbcatalog/v2beta1/dns_deepcopy.gen.go +++ b/proto-public/pbcatalog/v2beta1/dns_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using DNSPolicy within kubernetes types, where deepcopy-gen is used. func (in *DNSPolicy) DeepCopyInto(out *DNSPolicy) { - p := proto.Clone(in).(*DNSPolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSPolicy. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *DNSPolicy) DeepCopyInterface() interface{} { // DeepCopyInto supports using Weights within kubernetes types, where deepcopy-gen is used. func (in *Weights) DeepCopyInto(out *Weights) { - p := proto.Clone(in).(*Weights) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Weights. Required by controller-gen. diff --git a/proto-public/pbcatalog/v2beta1/failover_policy_deepcopy.gen.go b/proto-public/pbcatalog/v2beta1/failover_policy_deepcopy.gen.go index b0fc57e9723d..fabe3f9e30a6 100644 --- a/proto-public/pbcatalog/v2beta1/failover_policy_deepcopy.gen.go +++ b/proto-public/pbcatalog/v2beta1/failover_policy_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using FailoverPolicy within kubernetes types, where deepcopy-gen is used. func (in *FailoverPolicy) DeepCopyInto(out *FailoverPolicy) { - p := proto.Clone(in).(*FailoverPolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverPolicy. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *FailoverPolicy) DeepCopyInterface() interface{} { // DeepCopyInto supports using FailoverConfig within kubernetes types, where deepcopy-gen is used. func (in *FailoverConfig) DeepCopyInto(out *FailoverConfig) { - p := proto.Clone(in).(*FailoverConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverConfig. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *FailoverConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using FailoverDestination within kubernetes types, where deepcopy-gen is used. func (in *FailoverDestination) DeepCopyInto(out *FailoverDestination) { - p := proto.Clone(in).(*FailoverDestination) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverDestination. Required by controller-gen. diff --git a/proto-public/pbcatalog/v2beta1/health_deepcopy.gen.go b/proto-public/pbcatalog/v2beta1/health_deepcopy.gen.go index bb24cbe052fd..677a1af1056d 100644 --- a/proto-public/pbcatalog/v2beta1/health_deepcopy.gen.go +++ b/proto-public/pbcatalog/v2beta1/health_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using HealthStatus within kubernetes types, where deepcopy-gen is used. func (in *HealthStatus) DeepCopyInto(out *HealthStatus) { - p := proto.Clone(in).(*HealthStatus) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthStatus. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *HealthStatus) DeepCopyInterface() interface{} { // DeepCopyInto supports using HealthChecks within kubernetes types, where deepcopy-gen is used. func (in *HealthChecks) DeepCopyInto(out *HealthChecks) { - p := proto.Clone(in).(*HealthChecks) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthChecks. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *HealthChecks) DeepCopyInterface() interface{} { // DeepCopyInto supports using HealthCheck within kubernetes types, where deepcopy-gen is used. func (in *HealthCheck) DeepCopyInto(out *HealthCheck) { - p := proto.Clone(in).(*HealthCheck) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheck. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *HealthCheck) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPCheck within kubernetes types, where deepcopy-gen is used. func (in *HTTPCheck) DeepCopyInto(out *HTTPCheck) { - p := proto.Clone(in).(*HTTPCheck) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCheck. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *HTTPCheck) DeepCopyInterface() interface{} { // DeepCopyInto supports using TCPCheck within kubernetes types, where deepcopy-gen is used. func (in *TCPCheck) DeepCopyInto(out *TCPCheck) { - p := proto.Clone(in).(*TCPCheck) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPCheck. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *TCPCheck) DeepCopyInterface() interface{} { // DeepCopyInto supports using UDPCheck within kubernetes types, where deepcopy-gen is used. func (in *UDPCheck) DeepCopyInto(out *UDPCheck) { - p := proto.Clone(in).(*UDPCheck) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPCheck. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *UDPCheck) DeepCopyInterface() interface{} { // DeepCopyInto supports using GRPCCheck within kubernetes types, where deepcopy-gen is used. func (in *GRPCCheck) DeepCopyInto(out *GRPCCheck) { - p := proto.Clone(in).(*GRPCCheck) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCCheck. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *GRPCCheck) DeepCopyInterface() interface{} { // DeepCopyInto supports using OSServiceCheck within kubernetes types, where deepcopy-gen is used. func (in *OSServiceCheck) DeepCopyInto(out *OSServiceCheck) { - p := proto.Clone(in).(*OSServiceCheck) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSServiceCheck. Required by controller-gen. @@ -175,8 +175,8 @@ func (in *OSServiceCheck) DeepCopyInterface() interface{} { // DeepCopyInto supports using CheckTLSConfig within kubernetes types, where deepcopy-gen is used. func (in *CheckTLSConfig) DeepCopyInto(out *CheckTLSConfig) { - p := proto.Clone(in).(*CheckTLSConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckTLSConfig. Required by controller-gen. diff --git a/proto-public/pbcatalog/v2beta1/node_deepcopy.gen.go b/proto-public/pbcatalog/v2beta1/node_deepcopy.gen.go index 775faa3d535f..d864f7858fe6 100644 --- a/proto-public/pbcatalog/v2beta1/node_deepcopy.gen.go +++ b/proto-public/pbcatalog/v2beta1/node_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Node within kubernetes types, where deepcopy-gen is used. func (in *Node) DeepCopyInto(out *Node) { - p := proto.Clone(in).(*Node) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *Node) DeepCopyInterface() interface{} { // DeepCopyInto supports using NodeAddress within kubernetes types, where deepcopy-gen is used. func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { - p := proto.Clone(in).(*NodeAddress) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. Required by controller-gen. diff --git a/proto-public/pbcatalog/v2beta1/selector_deepcopy.gen.go b/proto-public/pbcatalog/v2beta1/selector_deepcopy.gen.go index 1f4ae1c7ff52..7e59d5588081 100644 --- a/proto-public/pbcatalog/v2beta1/selector_deepcopy.gen.go +++ b/proto-public/pbcatalog/v2beta1/selector_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using WorkloadSelector within kubernetes types, where deepcopy-gen is used. func (in *WorkloadSelector) DeepCopyInto(out *WorkloadSelector) { - p := proto.Clone(in).(*WorkloadSelector) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSelector. Required by controller-gen. diff --git a/proto-public/pbcatalog/v2beta1/service_deepcopy.gen.go b/proto-public/pbcatalog/v2beta1/service_deepcopy.gen.go index 92e77ecb9d44..fa578cc34464 100644 --- a/proto-public/pbcatalog/v2beta1/service_deepcopy.gen.go +++ b/proto-public/pbcatalog/v2beta1/service_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Service within kubernetes types, where deepcopy-gen is used. func (in *Service) DeepCopyInto(out *Service) { - p := proto.Clone(in).(*Service) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *Service) DeepCopyInterface() interface{} { // DeepCopyInto supports using ServicePort within kubernetes types, where deepcopy-gen is used. func (in *ServicePort) DeepCopyInto(out *ServicePort) { - p := proto.Clone(in).(*ServicePort) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. Required by controller-gen. diff --git a/proto-public/pbcatalog/v2beta1/service_endpoints_deepcopy.gen.go b/proto-public/pbcatalog/v2beta1/service_endpoints_deepcopy.gen.go index 6c1e3833bf41..f3634c091be7 100644 --- a/proto-public/pbcatalog/v2beta1/service_endpoints_deepcopy.gen.go +++ b/proto-public/pbcatalog/v2beta1/service_endpoints_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ServiceEndpoints within kubernetes types, where deepcopy-gen is used. func (in *ServiceEndpoints) DeepCopyInto(out *ServiceEndpoints) { - p := proto.Clone(in).(*ServiceEndpoints) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceEndpoints. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *ServiceEndpoints) DeepCopyInterface() interface{} { // DeepCopyInto supports using Endpoint within kubernetes types, where deepcopy-gen is used. func (in *Endpoint) DeepCopyInto(out *Endpoint) { - p := proto.Clone(in).(*Endpoint) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. Required by controller-gen. diff --git a/proto-public/pbcatalog/v2beta1/vip_deepcopy.gen.go b/proto-public/pbcatalog/v2beta1/vip_deepcopy.gen.go index d49da5045ba2..8308240c52b0 100644 --- a/proto-public/pbcatalog/v2beta1/vip_deepcopy.gen.go +++ b/proto-public/pbcatalog/v2beta1/vip_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using VirtualIPs within kubernetes types, where deepcopy-gen is used. func (in *VirtualIPs) DeepCopyInto(out *VirtualIPs) { - p := proto.Clone(in).(*VirtualIPs) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualIPs. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *VirtualIPs) DeepCopyInterface() interface{} { // DeepCopyInto supports using IP within kubernetes types, where deepcopy-gen is used. func (in *IP) DeepCopyInto(out *IP) { - p := proto.Clone(in).(*IP) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IP. Required by controller-gen. diff --git a/proto-public/pbcatalog/v2beta1/workload_deepcopy.gen.go b/proto-public/pbcatalog/v2beta1/workload_deepcopy.gen.go index fbb4b50af325..10d71872bf6d 100644 --- a/proto-public/pbcatalog/v2beta1/workload_deepcopy.gen.go +++ b/proto-public/pbcatalog/v2beta1/workload_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Workload within kubernetes types, where deepcopy-gen is used. func (in *Workload) DeepCopyInto(out *Workload) { - p := proto.Clone(in).(*Workload) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workload. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *Workload) DeepCopyInterface() interface{} { // DeepCopyInto supports using WorkloadAddress within kubernetes types, where deepcopy-gen is used. func (in *WorkloadAddress) DeepCopyInto(out *WorkloadAddress) { - p := proto.Clone(in).(*WorkloadAddress) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadAddress. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *WorkloadAddress) DeepCopyInterface() interface{} { // DeepCopyInto supports using WorkloadPort within kubernetes types, where deepcopy-gen is used. func (in *WorkloadPort) DeepCopyInto(out *WorkloadPort) { - p := proto.Clone(in).(*WorkloadPort) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadPort. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *WorkloadPort) DeepCopyInterface() interface{} { // DeepCopyInto supports using Locality within kubernetes types, where deepcopy-gen is used. func (in *Locality) DeepCopyInto(out *Locality) { - p := proto.Clone(in).(*Locality) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Locality. Required by controller-gen. diff --git a/proto-public/pbconnectca/ca_deepcopy.gen.go b/proto-public/pbconnectca/ca_deepcopy.gen.go index 5200e4d9cac7..8ac221b224c5 100644 --- a/proto-public/pbconnectca/ca_deepcopy.gen.go +++ b/proto-public/pbconnectca/ca_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using WatchRootsRequest within kubernetes types, where deepcopy-gen is used. func (in *WatchRootsRequest) DeepCopyInto(out *WatchRootsRequest) { - p := proto.Clone(in).(*WatchRootsRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchRootsRequest. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *WatchRootsRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using WatchRootsResponse within kubernetes types, where deepcopy-gen is used. func (in *WatchRootsResponse) DeepCopyInto(out *WatchRootsResponse) { - p := proto.Clone(in).(*WatchRootsResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchRootsResponse. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *WatchRootsResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using CARoot within kubernetes types, where deepcopy-gen is used. func (in *CARoot) DeepCopyInto(out *CARoot) { - p := proto.Clone(in).(*CARoot) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CARoot. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *CARoot) DeepCopyInterface() interface{} { // DeepCopyInto supports using SignRequest within kubernetes types, where deepcopy-gen is used. func (in *SignRequest) DeepCopyInto(out *SignRequest) { - p := proto.Clone(in).(*SignRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignRequest. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *SignRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using SignResponse within kubernetes types, where deepcopy-gen is used. func (in *SignResponse) DeepCopyInto(out *SignResponse) { - p := proto.Clone(in).(*SignResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignResponse. Required by controller-gen. diff --git a/proto-public/pbdataplane/dataplane_deepcopy.gen.go b/proto-public/pbdataplane/dataplane_deepcopy.gen.go index 1d787b70721d..e1daf2b699fb 100644 --- a/proto-public/pbdataplane/dataplane_deepcopy.gen.go +++ b/proto-public/pbdataplane/dataplane_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using GetSupportedDataplaneFeaturesRequest within kubernetes types, where deepcopy-gen is used. func (in *GetSupportedDataplaneFeaturesRequest) DeepCopyInto(out *GetSupportedDataplaneFeaturesRequest) { - p := proto.Clone(in).(*GetSupportedDataplaneFeaturesRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetSupportedDataplaneFeaturesRequest. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *GetSupportedDataplaneFeaturesRequest) DeepCopyInterface() interface{} // DeepCopyInto supports using DataplaneFeatureSupport within kubernetes types, where deepcopy-gen is used. func (in *DataplaneFeatureSupport) DeepCopyInto(out *DataplaneFeatureSupport) { - p := proto.Clone(in).(*DataplaneFeatureSupport) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataplaneFeatureSupport. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *DataplaneFeatureSupport) DeepCopyInterface() interface{} { // DeepCopyInto supports using GetSupportedDataplaneFeaturesResponse within kubernetes types, where deepcopy-gen is used. func (in *GetSupportedDataplaneFeaturesResponse) DeepCopyInto(out *GetSupportedDataplaneFeaturesResponse) { - p := proto.Clone(in).(*GetSupportedDataplaneFeaturesResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetSupportedDataplaneFeaturesResponse. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *GetSupportedDataplaneFeaturesResponse) DeepCopyInterface() interface{} // DeepCopyInto supports using GetEnvoyBootstrapParamsRequest within kubernetes types, where deepcopy-gen is used. func (in *GetEnvoyBootstrapParamsRequest) DeepCopyInto(out *GetEnvoyBootstrapParamsRequest) { - p := proto.Clone(in).(*GetEnvoyBootstrapParamsRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetEnvoyBootstrapParamsRequest. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *GetEnvoyBootstrapParamsRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using GetEnvoyBootstrapParamsResponse within kubernetes types, where deepcopy-gen is used. func (in *GetEnvoyBootstrapParamsResponse) DeepCopyInto(out *GetEnvoyBootstrapParamsResponse) { - p := proto.Clone(in).(*GetEnvoyBootstrapParamsResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetEnvoyBootstrapParamsResponse. Required by controller-gen. diff --git a/proto-public/pbdns/dns_deepcopy.gen.go b/proto-public/pbdns/dns_deepcopy.gen.go index c2975e2d7f9a..339ab448e1d1 100644 --- a/proto-public/pbdns/dns_deepcopy.gen.go +++ b/proto-public/pbdns/dns_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using QueryRequest within kubernetes types, where deepcopy-gen is used. func (in *QueryRequest) DeepCopyInto(out *QueryRequest) { - p := proto.Clone(in).(*QueryRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryRequest. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *QueryRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using QueryResponse within kubernetes types, where deepcopy-gen is used. func (in *QueryResponse) DeepCopyInto(out *QueryResponse) { - p := proto.Clone(in).(*QueryResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryResponse. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/common_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/common_deepcopy.gen.go index dae015adf7ce..f9aa662c4182 100644 --- a/proto-public/pbmesh/v2beta1/common_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/common_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ParentReference within kubernetes types, where deepcopy-gen is used. func (in *ParentReference) DeepCopyInto(out *ParentReference) { - p := proto.Clone(in).(*ParentReference) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *ParentReference) DeepCopyInterface() interface{} { // DeepCopyInto supports using BackendReference within kubernetes types, where deepcopy-gen is used. func (in *BackendReference) DeepCopyInto(out *BackendReference) { - p := proto.Clone(in).(*BackendReference) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendReference. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/computed_explicit_destinations_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/computed_explicit_destinations_deepcopy.gen.go index 3e237e2ac009..7a14e57454f2 100644 --- a/proto-public/pbmesh/v2beta1/computed_explicit_destinations_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/computed_explicit_destinations_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ComputedExplicitDestinations within kubernetes types, where deepcopy-gen is used. func (in *ComputedExplicitDestinations) DeepCopyInto(out *ComputedExplicitDestinations) { - p := proto.Clone(in).(*ComputedExplicitDestinations) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedExplicitDestinations. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/computed_proxy_configuration_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/computed_proxy_configuration_deepcopy.gen.go index c8d608122268..d6eb5723030b 100644 --- a/proto-public/pbmesh/v2beta1/computed_proxy_configuration_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/computed_proxy_configuration_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ComputedProxyConfiguration within kubernetes types, where deepcopy-gen is used. func (in *ComputedProxyConfiguration) DeepCopyInto(out *ComputedProxyConfiguration) { - p := proto.Clone(in).(*ComputedProxyConfiguration) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedProxyConfiguration. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/computed_routes_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/computed_routes_deepcopy.gen.go index 01e077aacf00..d63358455892 100644 --- a/proto-public/pbmesh/v2beta1/computed_routes_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/computed_routes_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ComputedRoutes within kubernetes types, where deepcopy-gen is used. func (in *ComputedRoutes) DeepCopyInto(out *ComputedRoutes) { - p := proto.Clone(in).(*ComputedRoutes) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedRoutes. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *ComputedRoutes) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedPortRoutes within kubernetes types, where deepcopy-gen is used. func (in *ComputedPortRoutes) DeepCopyInto(out *ComputedPortRoutes) { - p := proto.Clone(in).(*ComputedPortRoutes) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedPortRoutes. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *ComputedPortRoutes) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedHTTPRoute within kubernetes types, where deepcopy-gen is used. func (in *ComputedHTTPRoute) DeepCopyInto(out *ComputedHTTPRoute) { - p := proto.Clone(in).(*ComputedHTTPRoute) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedHTTPRoute. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *ComputedHTTPRoute) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedHTTPRouteRule within kubernetes types, where deepcopy-gen is used. func (in *ComputedHTTPRouteRule) DeepCopyInto(out *ComputedHTTPRouteRule) { - p := proto.Clone(in).(*ComputedHTTPRouteRule) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedHTTPRouteRule. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *ComputedHTTPRouteRule) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedHTTPBackendRef within kubernetes types, where deepcopy-gen is used. func (in *ComputedHTTPBackendRef) DeepCopyInto(out *ComputedHTTPBackendRef) { - p := proto.Clone(in).(*ComputedHTTPBackendRef) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedHTTPBackendRef. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *ComputedHTTPBackendRef) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedGRPCRoute within kubernetes types, where deepcopy-gen is used. func (in *ComputedGRPCRoute) DeepCopyInto(out *ComputedGRPCRoute) { - p := proto.Clone(in).(*ComputedGRPCRoute) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedGRPCRoute. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *ComputedGRPCRoute) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedGRPCRouteRule within kubernetes types, where deepcopy-gen is used. func (in *ComputedGRPCRouteRule) DeepCopyInto(out *ComputedGRPCRouteRule) { - p := proto.Clone(in).(*ComputedGRPCRouteRule) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedGRPCRouteRule. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *ComputedGRPCRouteRule) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedGRPCBackendRef within kubernetes types, where deepcopy-gen is used. func (in *ComputedGRPCBackendRef) DeepCopyInto(out *ComputedGRPCBackendRef) { - p := proto.Clone(in).(*ComputedGRPCBackendRef) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedGRPCBackendRef. Required by controller-gen. @@ -175,8 +175,8 @@ func (in *ComputedGRPCBackendRef) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedTCPRoute within kubernetes types, where deepcopy-gen is used. func (in *ComputedTCPRoute) DeepCopyInto(out *ComputedTCPRoute) { - p := proto.Clone(in).(*ComputedTCPRoute) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedTCPRoute. Required by controller-gen. @@ -196,8 +196,8 @@ func (in *ComputedTCPRoute) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedTCPRouteRule within kubernetes types, where deepcopy-gen is used. func (in *ComputedTCPRouteRule) DeepCopyInto(out *ComputedTCPRouteRule) { - p := proto.Clone(in).(*ComputedTCPRouteRule) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedTCPRouteRule. Required by controller-gen. @@ -217,8 +217,8 @@ func (in *ComputedTCPRouteRule) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedTCPBackendRef within kubernetes types, where deepcopy-gen is used. func (in *ComputedTCPBackendRef) DeepCopyInto(out *ComputedTCPBackendRef) { - p := proto.Clone(in).(*ComputedTCPBackendRef) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedTCPBackendRef. Required by controller-gen. @@ -238,8 +238,8 @@ func (in *ComputedTCPBackendRef) DeepCopyInterface() interface{} { // DeepCopyInto supports using BackendTargetDetails within kubernetes types, where deepcopy-gen is used. func (in *BackendTargetDetails) DeepCopyInto(out *BackendTargetDetails) { - p := proto.Clone(in).(*BackendTargetDetails) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTargetDetails. Required by controller-gen. @@ -259,8 +259,8 @@ func (in *BackendTargetDetails) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedFailoverConfig within kubernetes types, where deepcopy-gen is used. func (in *ComputedFailoverConfig) DeepCopyInto(out *ComputedFailoverConfig) { - p := proto.Clone(in).(*ComputedFailoverConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedFailoverConfig. Required by controller-gen. @@ -280,8 +280,8 @@ func (in *ComputedFailoverConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using ComputedFailoverDestination within kubernetes types, where deepcopy-gen is used. func (in *ComputedFailoverDestination) DeepCopyInto(out *ComputedFailoverDestination) { - p := proto.Clone(in).(*ComputedFailoverDestination) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedFailoverDestination. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/connection_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/connection_deepcopy.gen.go index 97666ed9eb4c..a0cfa7775756 100644 --- a/proto-public/pbmesh/v2beta1/connection_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/connection_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ConnectionConfig within kubernetes types, where deepcopy-gen is used. func (in *ConnectionConfig) DeepCopyInto(out *ConnectionConfig) { - p := proto.Clone(in).(*ConnectionConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConfig. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *ConnectionConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using InboundConnectionsConfig within kubernetes types, where deepcopy-gen is used. func (in *InboundConnectionsConfig) DeepCopyInto(out *InboundConnectionsConfig) { - p := proto.Clone(in).(*InboundConnectionsConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundConnectionsConfig. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/destination_policy_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/destination_policy_deepcopy.gen.go index 5ac5542a54bf..1935359729e5 100644 --- a/proto-public/pbmesh/v2beta1/destination_policy_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/destination_policy_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using DestinationPolicy within kubernetes types, where deepcopy-gen is used. func (in *DestinationPolicy) DeepCopyInto(out *DestinationPolicy) { - p := proto.Clone(in).(*DestinationPolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationPolicy. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *DestinationPolicy) DeepCopyInterface() interface{} { // DeepCopyInto supports using DestinationConfig within kubernetes types, where deepcopy-gen is used. func (in *DestinationConfig) DeepCopyInto(out *DestinationConfig) { - p := proto.Clone(in).(*DestinationConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfig. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *DestinationConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using LocalityPrioritization within kubernetes types, where deepcopy-gen is used. func (in *LocalityPrioritization) DeepCopyInto(out *LocalityPrioritization) { - p := proto.Clone(in).(*LocalityPrioritization) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalityPrioritization. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *LocalityPrioritization) DeepCopyInterface() interface{} { // DeepCopyInto supports using LoadBalancer within kubernetes types, where deepcopy-gen is used. func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { - p := proto.Clone(in).(*LoadBalancer) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *LoadBalancer) DeepCopyInterface() interface{} { // DeepCopyInto supports using RingHashConfig within kubernetes types, where deepcopy-gen is used. func (in *RingHashConfig) DeepCopyInto(out *RingHashConfig) { - p := proto.Clone(in).(*RingHashConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RingHashConfig. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *RingHashConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using LeastRequestConfig within kubernetes types, where deepcopy-gen is used. func (in *LeastRequestConfig) DeepCopyInto(out *LeastRequestConfig) { - p := proto.Clone(in).(*LeastRequestConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeastRequestConfig. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *LeastRequestConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using HashPolicy within kubernetes types, where deepcopy-gen is used. func (in *HashPolicy) DeepCopyInto(out *HashPolicy) { - p := proto.Clone(in).(*HashPolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HashPolicy. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *HashPolicy) DeepCopyInterface() interface{} { // DeepCopyInto supports using CookieConfig within kubernetes types, where deepcopy-gen is used. func (in *CookieConfig) DeepCopyInto(out *CookieConfig) { - p := proto.Clone(in).(*CookieConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookieConfig. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/destinations_configuration_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/destinations_configuration_deepcopy.gen.go index d1413cc89df1..1c7f04e7f23a 100644 --- a/proto-public/pbmesh/v2beta1/destinations_configuration_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/destinations_configuration_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using DestinationsConfiguration within kubernetes types, where deepcopy-gen is used. func (in *DestinationsConfiguration) DeepCopyInto(out *DestinationsConfiguration) { - p := proto.Clone(in).(*DestinationsConfiguration) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationsConfiguration. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *DestinationsConfiguration) DeepCopyInterface() interface{} { // DeepCopyInto supports using DestinationConfigOverrides within kubernetes types, where deepcopy-gen is used. func (in *DestinationConfigOverrides) DeepCopyInto(out *DestinationConfigOverrides) { - p := proto.Clone(in).(*DestinationConfigOverrides) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigOverrides. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *DestinationConfigOverrides) DeepCopyInterface() interface{} { // DeepCopyInto supports using DestinationConfiguration within kubernetes types, where deepcopy-gen is used. func (in *DestinationConfiguration) DeepCopyInto(out *DestinationConfiguration) { - p := proto.Clone(in).(*DestinationConfiguration) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfiguration. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *DestinationConfiguration) DeepCopyInterface() interface{} { // DeepCopyInto supports using DestinationLimits within kubernetes types, where deepcopy-gen is used. func (in *DestinationLimits) DeepCopyInto(out *DestinationLimits) { - p := proto.Clone(in).(*DestinationLimits) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationLimits. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *DestinationLimits) DeepCopyInterface() interface{} { // DeepCopyInto supports using PassiveHealthCheck within kubernetes types, where deepcopy-gen is used. func (in *PassiveHealthCheck) DeepCopyInto(out *PassiveHealthCheck) { - p := proto.Clone(in).(*PassiveHealthCheck) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PassiveHealthCheck. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/destinations_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/destinations_deepcopy.gen.go index ff04bf3fcf8b..3f513b7d861d 100644 --- a/proto-public/pbmesh/v2beta1/destinations_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/destinations_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Destinations within kubernetes types, where deepcopy-gen is used. func (in *Destinations) DeepCopyInto(out *Destinations) { - p := proto.Clone(in).(*Destinations) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destinations. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *Destinations) DeepCopyInterface() interface{} { // DeepCopyInto supports using Destination within kubernetes types, where deepcopy-gen is used. func (in *Destination) DeepCopyInto(out *Destination) { - p := proto.Clone(in).(*Destination) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *Destination) DeepCopyInterface() interface{} { // DeepCopyInto supports using IPPortAddress within kubernetes types, where deepcopy-gen is used. func (in *IPPortAddress) DeepCopyInto(out *IPPortAddress) { - p := proto.Clone(in).(*IPPortAddress) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPortAddress. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *IPPortAddress) DeepCopyInterface() interface{} { // DeepCopyInto supports using UnixSocketAddress within kubernetes types, where deepcopy-gen is used. func (in *UnixSocketAddress) DeepCopyInto(out *UnixSocketAddress) { - p := proto.Clone(in).(*UnixSocketAddress) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnixSocketAddress. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *UnixSocketAddress) DeepCopyInterface() interface{} { // DeepCopyInto supports using PreparedQueryDestination within kubernetes types, where deepcopy-gen is used. func (in *PreparedQueryDestination) DeepCopyInto(out *PreparedQueryDestination) { - p := proto.Clone(in).(*PreparedQueryDestination) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreparedQueryDestination. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/expose_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/expose_deepcopy.gen.go index e191d353bdcb..2b75adbbcd34 100644 --- a/proto-public/pbmesh/v2beta1/expose_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/expose_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ExposeConfig within kubernetes types, where deepcopy-gen is used. func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) { - p := proto.Clone(in).(*ExposeConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposeConfig. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *ExposeConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using ExposePath within kubernetes types, where deepcopy-gen is used. func (in *ExposePath) DeepCopyInto(out *ExposePath) { - p := proto.Clone(in).(*ExposePath) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposePath. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/grpc_route_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/grpc_route_deepcopy.gen.go index 18574bb8cc20..ea7e1dc8ed26 100644 --- a/proto-public/pbmesh/v2beta1/grpc_route_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/grpc_route_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using GRPCRoute within kubernetes types, where deepcopy-gen is used. func (in *GRPCRoute) DeepCopyInto(out *GRPCRoute) { - p := proto.Clone(in).(*GRPCRoute) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRoute. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *GRPCRoute) DeepCopyInterface() interface{} { // DeepCopyInto supports using GRPCRouteRule within kubernetes types, where deepcopy-gen is used. func (in *GRPCRouteRule) DeepCopyInto(out *GRPCRouteRule) { - p := proto.Clone(in).(*GRPCRouteRule) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteRule. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *GRPCRouteRule) DeepCopyInterface() interface{} { // DeepCopyInto supports using GRPCRouteMatch within kubernetes types, where deepcopy-gen is used. func (in *GRPCRouteMatch) DeepCopyInto(out *GRPCRouteMatch) { - p := proto.Clone(in).(*GRPCRouteMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteMatch. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *GRPCRouteMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using GRPCMethodMatch within kubernetes types, where deepcopy-gen is used. func (in *GRPCMethodMatch) DeepCopyInto(out *GRPCMethodMatch) { - p := proto.Clone(in).(*GRPCMethodMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCMethodMatch. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *GRPCMethodMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using GRPCHeaderMatch within kubernetes types, where deepcopy-gen is used. func (in *GRPCHeaderMatch) DeepCopyInto(out *GRPCHeaderMatch) { - p := proto.Clone(in).(*GRPCHeaderMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCHeaderMatch. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *GRPCHeaderMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using GRPCRouteFilter within kubernetes types, where deepcopy-gen is used. func (in *GRPCRouteFilter) DeepCopyInto(out *GRPCRouteFilter) { - p := proto.Clone(in).(*GRPCRouteFilter) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteFilter. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *GRPCRouteFilter) DeepCopyInterface() interface{} { // DeepCopyInto supports using GRPCBackendRef within kubernetes types, where deepcopy-gen is used. func (in *GRPCBackendRef) DeepCopyInto(out *GRPCBackendRef) { - p := proto.Clone(in).(*GRPCBackendRef) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCBackendRef. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/http_route_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/http_route_deepcopy.gen.go index 222cb8361e03..6d339e5afde2 100644 --- a/proto-public/pbmesh/v2beta1/http_route_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/http_route_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using HTTPRoute within kubernetes types, where deepcopy-gen is used. func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) { - p := proto.Clone(in).(*HTTPRoute) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRoute. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *HTTPRoute) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPRouteRule within kubernetes types, where deepcopy-gen is used. func (in *HTTPRouteRule) DeepCopyInto(out *HTTPRouteRule) { - p := proto.Clone(in).(*HTTPRouteRule) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRule. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *HTTPRouteRule) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPRouteMatch within kubernetes types, where deepcopy-gen is used. func (in *HTTPRouteMatch) DeepCopyInto(out *HTTPRouteMatch) { - p := proto.Clone(in).(*HTTPRouteMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatch. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *HTTPRouteMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPPathMatch within kubernetes types, where deepcopy-gen is used. func (in *HTTPPathMatch) DeepCopyInto(out *HTTPPathMatch) { - p := proto.Clone(in).(*HTTPPathMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPathMatch. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *HTTPPathMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPHeaderMatch within kubernetes types, where deepcopy-gen is used. func (in *HTTPHeaderMatch) DeepCopyInto(out *HTTPHeaderMatch) { - p := proto.Clone(in).(*HTTPHeaderMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderMatch. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *HTTPHeaderMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPQueryParamMatch within kubernetes types, where deepcopy-gen is used. func (in *HTTPQueryParamMatch) DeepCopyInto(out *HTTPQueryParamMatch) { - p := proto.Clone(in).(*HTTPQueryParamMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPQueryParamMatch. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *HTTPQueryParamMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPRouteFilter within kubernetes types, where deepcopy-gen is used. func (in *HTTPRouteFilter) DeepCopyInto(out *HTTPRouteFilter) { - p := proto.Clone(in).(*HTTPRouteFilter) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteFilter. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *HTTPRouteFilter) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPHeaderFilter within kubernetes types, where deepcopy-gen is used. func (in *HTTPHeaderFilter) DeepCopyInto(out *HTTPHeaderFilter) { - p := proto.Clone(in).(*HTTPHeaderFilter) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderFilter. Required by controller-gen. @@ -175,8 +175,8 @@ func (in *HTTPHeaderFilter) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPHeader within kubernetes types, where deepcopy-gen is used. func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { - p := proto.Clone(in).(*HTTPHeader) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. Required by controller-gen. @@ -196,8 +196,8 @@ func (in *HTTPHeader) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPURLRewriteFilter within kubernetes types, where deepcopy-gen is used. func (in *HTTPURLRewriteFilter) DeepCopyInto(out *HTTPURLRewriteFilter) { - p := proto.Clone(in).(*HTTPURLRewriteFilter) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPURLRewriteFilter. Required by controller-gen. @@ -217,8 +217,8 @@ func (in *HTTPURLRewriteFilter) DeepCopyInterface() interface{} { // DeepCopyInto supports using HTTPBackendRef within kubernetes types, where deepcopy-gen is used. func (in *HTTPBackendRef) DeepCopyInto(out *HTTPBackendRef) { - p := proto.Clone(in).(*HTTPBackendRef) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBackendRef. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/http_route_retries_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/http_route_retries_deepcopy.gen.go index b523a58b77c1..abd2a515332d 100644 --- a/proto-public/pbmesh/v2beta1/http_route_retries_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/http_route_retries_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using HTTPRouteRetries within kubernetes types, where deepcopy-gen is used. func (in *HTTPRouteRetries) DeepCopyInto(out *HTTPRouteRetries) { - p := proto.Clone(in).(*HTTPRouteRetries) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetries. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/http_route_timeouts_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/http_route_timeouts_deepcopy.gen.go index ea92d9038d48..c131310e5e26 100644 --- a/proto-public/pbmesh/v2beta1/http_route_timeouts_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/http_route_timeouts_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using HTTPRouteTimeouts within kubernetes types, where deepcopy-gen is used. func (in *HTTPRouteTimeouts) DeepCopyInto(out *HTTPRouteTimeouts) { - p := proto.Clone(in).(*HTTPRouteTimeouts) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeouts. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/access_logs_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/access_logs_deepcopy.gen.go index af2ac36c73ad..f4c5c96039d5 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/access_logs_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/access_logs_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using AccessLogs within kubernetes types, where deepcopy-gen is used. func (in *AccessLogs) DeepCopyInto(out *AccessLogs) { - p := proto.Clone(in).(*AccessLogs) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogs. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/address_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/address_deepcopy.gen.go index 81cf39a4593a..b2700f704eff 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/address_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/address_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using HostPortAddress within kubernetes types, where deepcopy-gen is used. func (in *HostPortAddress) DeepCopyInto(out *HostPortAddress) { - p := proto.Clone(in).(*HostPortAddress) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortAddress. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *HostPortAddress) DeepCopyInterface() interface{} { // DeepCopyInto supports using UnixSocketAddress within kubernetes types, where deepcopy-gen is used. func (in *UnixSocketAddress) DeepCopyInto(out *UnixSocketAddress) { - p := proto.Clone(in).(*UnixSocketAddress) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnixSocketAddress. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/cluster_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/cluster_deepcopy.gen.go index f06ac976e13f..1818a349bd45 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/cluster_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/cluster_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Cluster within kubernetes types, where deepcopy-gen is used. func (in *Cluster) DeepCopyInto(out *Cluster) { - p := proto.Clone(in).(*Cluster) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *Cluster) DeepCopyInterface() interface{} { // DeepCopyInto supports using FailoverGroup within kubernetes types, where deepcopy-gen is used. func (in *FailoverGroup) DeepCopyInto(out *FailoverGroup) { - p := proto.Clone(in).(*FailoverGroup) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverGroup. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *FailoverGroup) DeepCopyInterface() interface{} { // DeepCopyInto supports using FailoverGroupConfig within kubernetes types, where deepcopy-gen is used. func (in *FailoverGroupConfig) DeepCopyInto(out *FailoverGroupConfig) { - p := proto.Clone(in).(*FailoverGroupConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverGroupConfig. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *FailoverGroupConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using EndpointGroup within kubernetes types, where deepcopy-gen is used. func (in *EndpointGroup) DeepCopyInto(out *EndpointGroup) { - p := proto.Clone(in).(*EndpointGroup) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointGroup. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *EndpointGroup) DeepCopyInterface() interface{} { // DeepCopyInto supports using DynamicEndpointGroup within kubernetes types, where deepcopy-gen is used. func (in *DynamicEndpointGroup) DeepCopyInto(out *DynamicEndpointGroup) { - p := proto.Clone(in).(*DynamicEndpointGroup) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicEndpointGroup. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *DynamicEndpointGroup) DeepCopyInterface() interface{} { // DeepCopyInto supports using PassthroughEndpointGroup within kubernetes types, where deepcopy-gen is used. func (in *PassthroughEndpointGroup) DeepCopyInto(out *PassthroughEndpointGroup) { - p := proto.Clone(in).(*PassthroughEndpointGroup) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PassthroughEndpointGroup. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *PassthroughEndpointGroup) DeepCopyInterface() interface{} { // DeepCopyInto supports using DNSEndpointGroup within kubernetes types, where deepcopy-gen is used. func (in *DNSEndpointGroup) DeepCopyInto(out *DNSEndpointGroup) { - p := proto.Clone(in).(*DNSEndpointGroup) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointGroup. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *DNSEndpointGroup) DeepCopyInterface() interface{} { // DeepCopyInto supports using StaticEndpointGroup within kubernetes types, where deepcopy-gen is used. func (in *StaticEndpointGroup) DeepCopyInto(out *StaticEndpointGroup) { - p := proto.Clone(in).(*StaticEndpointGroup) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticEndpointGroup. Required by controller-gen. @@ -175,8 +175,8 @@ func (in *StaticEndpointGroup) DeepCopyInterface() interface{} { // DeepCopyInto supports using DestinationCluster within kubernetes types, where deepcopy-gen is used. func (in *DestinationCluster) DeepCopyInto(out *DestinationCluster) { - p := proto.Clone(in).(*DestinationCluster) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationCluster. Required by controller-gen. @@ -196,8 +196,8 @@ func (in *DestinationCluster) DeepCopyInterface() interface{} { // DeepCopyInto supports using L4WeightedClusterGroup within kubernetes types, where deepcopy-gen is used. func (in *L4WeightedClusterGroup) DeepCopyInto(out *L4WeightedClusterGroup) { - p := proto.Clone(in).(*L4WeightedClusterGroup) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4WeightedClusterGroup. Required by controller-gen. @@ -217,8 +217,8 @@ func (in *L4WeightedClusterGroup) DeepCopyInterface() interface{} { // DeepCopyInto supports using L7WeightedClusterGroup within kubernetes types, where deepcopy-gen is used. func (in *L7WeightedClusterGroup) DeepCopyInto(out *L7WeightedClusterGroup) { - p := proto.Clone(in).(*L7WeightedClusterGroup) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L7WeightedClusterGroup. Required by controller-gen. @@ -238,8 +238,8 @@ func (in *L7WeightedClusterGroup) DeepCopyInterface() interface{} { // DeepCopyInto supports using L4WeightedDestinationCluster within kubernetes types, where deepcopy-gen is used. func (in *L4WeightedDestinationCluster) DeepCopyInto(out *L4WeightedDestinationCluster) { - p := proto.Clone(in).(*L4WeightedDestinationCluster) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4WeightedDestinationCluster. Required by controller-gen. @@ -259,8 +259,8 @@ func (in *L4WeightedDestinationCluster) DeepCopyInterface() interface{} { // DeepCopyInto supports using L7WeightedDestinationCluster within kubernetes types, where deepcopy-gen is used. func (in *L7WeightedDestinationCluster) DeepCopyInto(out *L7WeightedDestinationCluster) { - p := proto.Clone(in).(*L7WeightedDestinationCluster) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L7WeightedDestinationCluster. Required by controller-gen. @@ -280,8 +280,8 @@ func (in *L7WeightedDestinationCluster) DeepCopyInterface() interface{} { // DeepCopyInto supports using DynamicEndpointGroupConfig within kubernetes types, where deepcopy-gen is used. func (in *DynamicEndpointGroupConfig) DeepCopyInto(out *DynamicEndpointGroupConfig) { - p := proto.Clone(in).(*DynamicEndpointGroupConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicEndpointGroupConfig. Required by controller-gen. @@ -301,8 +301,8 @@ func (in *DynamicEndpointGroupConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using LBPolicyLeastRequest within kubernetes types, where deepcopy-gen is used. func (in *LBPolicyLeastRequest) DeepCopyInto(out *LBPolicyLeastRequest) { - p := proto.Clone(in).(*LBPolicyLeastRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBPolicyLeastRequest. Required by controller-gen. @@ -322,8 +322,8 @@ func (in *LBPolicyLeastRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using LBPolicyRoundRobin within kubernetes types, where deepcopy-gen is used. func (in *LBPolicyRoundRobin) DeepCopyInto(out *LBPolicyRoundRobin) { - p := proto.Clone(in).(*LBPolicyRoundRobin) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBPolicyRoundRobin. Required by controller-gen. @@ -343,8 +343,8 @@ func (in *LBPolicyRoundRobin) DeepCopyInterface() interface{} { // DeepCopyInto supports using LBPolicyRandom within kubernetes types, where deepcopy-gen is used. func (in *LBPolicyRandom) DeepCopyInto(out *LBPolicyRandom) { - p := proto.Clone(in).(*LBPolicyRandom) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBPolicyRandom. Required by controller-gen. @@ -364,8 +364,8 @@ func (in *LBPolicyRandom) DeepCopyInterface() interface{} { // DeepCopyInto supports using LBPolicyRingHash within kubernetes types, where deepcopy-gen is used. func (in *LBPolicyRingHash) DeepCopyInto(out *LBPolicyRingHash) { - p := proto.Clone(in).(*LBPolicyRingHash) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBPolicyRingHash. Required by controller-gen. @@ -385,8 +385,8 @@ func (in *LBPolicyRingHash) DeepCopyInterface() interface{} { // DeepCopyInto supports using LBPolicyMaglev within kubernetes types, where deepcopy-gen is used. func (in *LBPolicyMaglev) DeepCopyInto(out *LBPolicyMaglev) { - p := proto.Clone(in).(*LBPolicyMaglev) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBPolicyMaglev. Required by controller-gen. @@ -406,8 +406,8 @@ func (in *LBPolicyMaglev) DeepCopyInterface() interface{} { // DeepCopyInto supports using CircuitBreakers within kubernetes types, where deepcopy-gen is used. func (in *CircuitBreakers) DeepCopyInto(out *CircuitBreakers) { - p := proto.Clone(in).(*CircuitBreakers) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CircuitBreakers. Required by controller-gen. @@ -427,8 +427,8 @@ func (in *CircuitBreakers) DeepCopyInterface() interface{} { // DeepCopyInto supports using UpstreamLimits within kubernetes types, where deepcopy-gen is used. func (in *UpstreamLimits) DeepCopyInto(out *UpstreamLimits) { - p := proto.Clone(in).(*UpstreamLimits) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamLimits. Required by controller-gen. @@ -448,8 +448,8 @@ func (in *UpstreamLimits) DeepCopyInterface() interface{} { // DeepCopyInto supports using OutlierDetection within kubernetes types, where deepcopy-gen is used. func (in *OutlierDetection) DeepCopyInto(out *OutlierDetection) { - p := proto.Clone(in).(*OutlierDetection) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetection. Required by controller-gen. @@ -469,8 +469,8 @@ func (in *OutlierDetection) DeepCopyInterface() interface{} { // DeepCopyInto supports using UpstreamConnectionOptions within kubernetes types, where deepcopy-gen is used. func (in *UpstreamConnectionOptions) DeepCopyInto(out *UpstreamConnectionOptions) { - p := proto.Clone(in).(*UpstreamConnectionOptions) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamConnectionOptions. Required by controller-gen. @@ -490,8 +490,8 @@ func (in *UpstreamConnectionOptions) DeepCopyInterface() interface{} { // DeepCopyInto supports using PassthroughEndpointGroupConfig within kubernetes types, where deepcopy-gen is used. func (in *PassthroughEndpointGroupConfig) DeepCopyInto(out *PassthroughEndpointGroupConfig) { - p := proto.Clone(in).(*PassthroughEndpointGroupConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PassthroughEndpointGroupConfig. Required by controller-gen. @@ -511,8 +511,8 @@ func (in *PassthroughEndpointGroupConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using DNSEndpointGroupConfig within kubernetes types, where deepcopy-gen is used. func (in *DNSEndpointGroupConfig) DeepCopyInto(out *DNSEndpointGroupConfig) { - p := proto.Clone(in).(*DNSEndpointGroupConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointGroupConfig. Required by controller-gen. @@ -532,8 +532,8 @@ func (in *DNSEndpointGroupConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using StaticEndpointGroupConfig within kubernetes types, where deepcopy-gen is used. func (in *StaticEndpointGroupConfig) DeepCopyInto(out *StaticEndpointGroupConfig) { - p := proto.Clone(in).(*StaticEndpointGroupConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticEndpointGroupConfig. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/endpoints_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/endpoints_deepcopy.gen.go index 62efabc96394..eeb1daa5d23c 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/endpoints_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/endpoints_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Endpoints within kubernetes types, where deepcopy-gen is used. func (in *Endpoints) DeepCopyInto(out *Endpoints) { - p := proto.Clone(in).(*Endpoints) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *Endpoints) DeepCopyInterface() interface{} { // DeepCopyInto supports using Endpoint within kubernetes types, where deepcopy-gen is used. func (in *Endpoint) DeepCopyInto(out *Endpoint) { - p := proto.Clone(in).(*Endpoint) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches_deepcopy.gen.go index fc5617f23494..41fcb73d4662 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using EscapeHatches within kubernetes types, where deepcopy-gen is used. func (in *EscapeHatches) DeepCopyInto(out *EscapeHatches) { - p := proto.Clone(in).(*EscapeHatches) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EscapeHatches. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/header_mutations_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/header_mutations_deepcopy.gen.go index 2b09b2759738..97a77c7fc402 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/header_mutations_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/header_mutations_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using HeaderMutation within kubernetes types, where deepcopy-gen is used. func (in *HeaderMutation) DeepCopyInto(out *HeaderMutation) { - p := proto.Clone(in).(*HeaderMutation) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMutation. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *HeaderMutation) DeepCopyInterface() interface{} { // DeepCopyInto supports using RequestHeaderAdd within kubernetes types, where deepcopy-gen is used. func (in *RequestHeaderAdd) DeepCopyInto(out *RequestHeaderAdd) { - p := proto.Clone(in).(*RequestHeaderAdd) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderAdd. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *RequestHeaderAdd) DeepCopyInterface() interface{} { // DeepCopyInto supports using RequestHeaderRemove within kubernetes types, where deepcopy-gen is used. func (in *RequestHeaderRemove) DeepCopyInto(out *RequestHeaderRemove) { - p := proto.Clone(in).(*RequestHeaderRemove) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderRemove. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *RequestHeaderRemove) DeepCopyInterface() interface{} { // DeepCopyInto supports using ResponseHeaderAdd within kubernetes types, where deepcopy-gen is used. func (in *ResponseHeaderAdd) DeepCopyInto(out *ResponseHeaderAdd) { - p := proto.Clone(in).(*ResponseHeaderAdd) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderAdd. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *ResponseHeaderAdd) DeepCopyInterface() interface{} { // DeepCopyInto supports using ResponseHeaderRemove within kubernetes types, where deepcopy-gen is used. func (in *ResponseHeaderRemove) DeepCopyInto(out *ResponseHeaderRemove) { - p := proto.Clone(in).(*ResponseHeaderRemove) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderRemove. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *ResponseHeaderRemove) DeepCopyInterface() interface{} { // DeepCopyInto supports using Header within kubernetes types, where deepcopy-gen is used. func (in *Header) DeepCopyInto(out *Header) { - p := proto.Clone(in).(*Header) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Header. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/listener_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/listener_deepcopy.gen.go index 7208735dbd64..c721f40cbb91 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/listener_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/listener_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Listener within kubernetes types, where deepcopy-gen is used. func (in *Listener) DeepCopyInto(out *Listener) { - p := proto.Clone(in).(*Listener) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *Listener) DeepCopyInterface() interface{} { // DeepCopyInto supports using Router within kubernetes types, where deepcopy-gen is used. func (in *Router) DeepCopyInto(out *Router) { - p := proto.Clone(in).(*Router) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Router. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *Router) DeepCopyInterface() interface{} { // DeepCopyInto supports using Match within kubernetes types, where deepcopy-gen is used. func (in *Match) DeepCopyInto(out *Match) { - p := proto.Clone(in).(*Match) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Match. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *Match) DeepCopyInterface() interface{} { // DeepCopyInto supports using CidrRange within kubernetes types, where deepcopy-gen is used. func (in *CidrRange) DeepCopyInto(out *CidrRange) { - p := proto.Clone(in).(*CidrRange) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CidrRange. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *CidrRange) DeepCopyInterface() interface{} { // DeepCopyInto supports using L4Destination within kubernetes types, where deepcopy-gen is used. func (in *L4Destination) DeepCopyInto(out *L4Destination) { - p := proto.Clone(in).(*L4Destination) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4Destination. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *L4Destination) DeepCopyInterface() interface{} { // DeepCopyInto supports using L7DestinationRoute within kubernetes types, where deepcopy-gen is used. func (in *L7DestinationRoute) DeepCopyInto(out *L7DestinationRoute) { - p := proto.Clone(in).(*L7DestinationRoute) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L7DestinationRoute. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *L7DestinationRoute) DeepCopyInterface() interface{} { // DeepCopyInto supports using L7Destination within kubernetes types, where deepcopy-gen is used. func (in *L7Destination) DeepCopyInto(out *L7Destination) { - p := proto.Clone(in).(*L7Destination) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L7Destination. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *L7Destination) DeepCopyInterface() interface{} { // DeepCopyInto supports using SNIDestination within kubernetes types, where deepcopy-gen is used. func (in *SNIDestination) DeepCopyInto(out *SNIDestination) { - p := proto.Clone(in).(*SNIDestination) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNIDestination. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/references_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/references_deepcopy.gen.go index c52d23730cf9..2cbe25a91750 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/references_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/references_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using LeafCertificateRef within kubernetes types, where deepcopy-gen is used. func (in *LeafCertificateRef) DeepCopyInto(out *LeafCertificateRef) { - p := proto.Clone(in).(*LeafCertificateRef) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeafCertificateRef. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *LeafCertificateRef) DeepCopyInterface() interface{} { // DeepCopyInto supports using TrustBundleRef within kubernetes types, where deepcopy-gen is used. func (in *TrustBundleRef) DeepCopyInto(out *TrustBundleRef) { - p := proto.Clone(in).(*TrustBundleRef) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustBundleRef. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *TrustBundleRef) DeepCopyInterface() interface{} { // DeepCopyInto supports using EndpointRef within kubernetes types, where deepcopy-gen is used. func (in *EndpointRef) DeepCopyInto(out *EndpointRef) { - p := proto.Clone(in).(*EndpointRef) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointRef. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/route_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/route_deepcopy.gen.go index 2d2e71009410..e2eea4e78b5e 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/route_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/route_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Route within kubernetes types, where deepcopy-gen is used. func (in *Route) DeepCopyInto(out *Route) { - p := proto.Clone(in).(*Route) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *Route) DeepCopyInterface() interface{} { // DeepCopyInto supports using VirtualHost within kubernetes types, where deepcopy-gen is used. func (in *VirtualHost) DeepCopyInto(out *VirtualHost) { - p := proto.Clone(in).(*VirtualHost) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHost. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *VirtualHost) DeepCopyInterface() interface{} { // DeepCopyInto supports using RouteRule within kubernetes types, where deepcopy-gen is used. func (in *RouteRule) DeepCopyInto(out *RouteRule) { - p := proto.Clone(in).(*RouteRule) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteRule. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *RouteRule) DeepCopyInterface() interface{} { // DeepCopyInto supports using RouteMatch within kubernetes types, where deepcopy-gen is used. func (in *RouteMatch) DeepCopyInto(out *RouteMatch) { - p := proto.Clone(in).(*RouteMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteMatch. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *RouteMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using PathMatch within kubernetes types, where deepcopy-gen is used. func (in *PathMatch) DeepCopyInto(out *PathMatch) { - p := proto.Clone(in).(*PathMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathMatch. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *PathMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using QueryParameterMatch within kubernetes types, where deepcopy-gen is used. func (in *QueryParameterMatch) DeepCopyInto(out *QueryParameterMatch) { - p := proto.Clone(in).(*QueryParameterMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterMatch. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *QueryParameterMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using HeaderMatch within kubernetes types, where deepcopy-gen is used. func (in *HeaderMatch) DeepCopyInto(out *HeaderMatch) { - p := proto.Clone(in).(*HeaderMatch) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatch. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *HeaderMatch) DeepCopyInterface() interface{} { // DeepCopyInto supports using RouteDestination within kubernetes types, where deepcopy-gen is used. func (in *RouteDestination) DeepCopyInto(out *RouteDestination) { - p := proto.Clone(in).(*RouteDestination) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteDestination. Required by controller-gen. @@ -175,8 +175,8 @@ func (in *RouteDestination) DeepCopyInterface() interface{} { // DeepCopyInto supports using DestinationConfiguration within kubernetes types, where deepcopy-gen is used. func (in *DestinationConfiguration) DeepCopyInto(out *DestinationConfiguration) { - p := proto.Clone(in).(*DestinationConfiguration) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfiguration. Required by controller-gen. @@ -196,8 +196,8 @@ func (in *DestinationConfiguration) DeepCopyInterface() interface{} { // DeepCopyInto supports using RetryPolicy within kubernetes types, where deepcopy-gen is used. func (in *RetryPolicy) DeepCopyInto(out *RetryPolicy) { - p := proto.Clone(in).(*RetryPolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicy. Required by controller-gen. @@ -217,8 +217,8 @@ func (in *RetryPolicy) DeepCopyInterface() interface{} { // DeepCopyInto supports using TimeoutConfig within kubernetes types, where deepcopy-gen is used. func (in *TimeoutConfig) DeepCopyInto(out *TimeoutConfig) { - p := proto.Clone(in).(*TimeoutConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutConfig. Required by controller-gen. @@ -238,8 +238,8 @@ func (in *TimeoutConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using LoadBalancerHashPolicy within kubernetes types, where deepcopy-gen is used. func (in *LoadBalancerHashPolicy) DeepCopyInto(out *LoadBalancerHashPolicy) { - p := proto.Clone(in).(*LoadBalancerHashPolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerHashPolicy. Required by controller-gen. @@ -259,8 +259,8 @@ func (in *LoadBalancerHashPolicy) DeepCopyInterface() interface{} { // DeepCopyInto supports using CookiePolicy within kubernetes types, where deepcopy-gen is used. func (in *CookiePolicy) DeepCopyInto(out *CookiePolicy) { - p := proto.Clone(in).(*CookiePolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiePolicy. Required by controller-gen. @@ -280,8 +280,8 @@ func (in *CookiePolicy) DeepCopyInterface() interface{} { // DeepCopyInto supports using HeaderPolicy within kubernetes types, where deepcopy-gen is used. func (in *HeaderPolicy) DeepCopyInto(out *HeaderPolicy) { - p := proto.Clone(in).(*HeaderPolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderPolicy. Required by controller-gen. @@ -301,8 +301,8 @@ func (in *HeaderPolicy) DeepCopyInterface() interface{} { // DeepCopyInto supports using QueryParameterPolicy within kubernetes types, where deepcopy-gen is used. func (in *QueryParameterPolicy) DeepCopyInto(out *QueryParameterPolicy) { - p := proto.Clone(in).(*QueryParameterPolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterPolicy. Required by controller-gen. @@ -322,8 +322,8 @@ func (in *QueryParameterPolicy) DeepCopyInterface() interface{} { // DeepCopyInto supports using ConnectionPropertiesPolicy within kubernetes types, where deepcopy-gen is used. func (in *ConnectionPropertiesPolicy) DeepCopyInto(out *ConnectionPropertiesPolicy) { - p := proto.Clone(in).(*ConnectionPropertiesPolicy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPropertiesPolicy. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions_deepcopy.gen.go index 316c185dfa34..b45d2a79261a 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using TrafficPermissions within kubernetes types, where deepcopy-gen is used. func (in *TrafficPermissions) DeepCopyInto(out *TrafficPermissions) { - p := proto.Clone(in).(*TrafficPermissions) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficPermissions. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *TrafficPermissions) DeepCopyInterface() interface{} { // DeepCopyInto supports using Permission within kubernetes types, where deepcopy-gen is used. func (in *Permission) DeepCopyInto(out *Permission) { - p := proto.Clone(in).(*Permission) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Permission. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *Permission) DeepCopyInterface() interface{} { // DeepCopyInto supports using Principal within kubernetes types, where deepcopy-gen is used. func (in *Principal) DeepCopyInto(out *Principal) { - p := proto.Clone(in).(*Principal) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Principal. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *Principal) DeepCopyInterface() interface{} { // DeepCopyInto supports using Spiffe within kubernetes types, where deepcopy-gen is used. func (in *Spiffe) DeepCopyInto(out *Spiffe) { - p := proto.Clone(in).(*Spiffe) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spiffe. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/pbproxystate/transport_socket_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/pbproxystate/transport_socket_deepcopy.gen.go index f2ba600e77e4..0d0520e328b5 100644 --- a/proto-public/pbmesh/v2beta1/pbproxystate/transport_socket_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/pbproxystate/transport_socket_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using TLS within kubernetes types, where deepcopy-gen is used. func (in *TLS) DeepCopyInto(out *TLS) { - p := proto.Clone(in).(*TLS) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLS. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *TLS) DeepCopyInterface() interface{} { // DeepCopyInto supports using TransportSocket within kubernetes types, where deepcopy-gen is used. func (in *TransportSocket) DeepCopyInto(out *TransportSocket) { - p := proto.Clone(in).(*TransportSocket) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportSocket. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *TransportSocket) DeepCopyInterface() interface{} { // DeepCopyInto supports using InboundMeshMTLS within kubernetes types, where deepcopy-gen is used. func (in *InboundMeshMTLS) DeepCopyInto(out *InboundMeshMTLS) { - p := proto.Clone(in).(*InboundMeshMTLS) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundMeshMTLS. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *InboundMeshMTLS) DeepCopyInterface() interface{} { // DeepCopyInto supports using OutboundMeshMTLS within kubernetes types, where deepcopy-gen is used. func (in *OutboundMeshMTLS) DeepCopyInto(out *OutboundMeshMTLS) { - p := proto.Clone(in).(*OutboundMeshMTLS) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundMeshMTLS. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *OutboundMeshMTLS) DeepCopyInterface() interface{} { // DeepCopyInto supports using InboundNonMeshTLS within kubernetes types, where deepcopy-gen is used. func (in *InboundNonMeshTLS) DeepCopyInto(out *InboundNonMeshTLS) { - p := proto.Clone(in).(*InboundNonMeshTLS) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundNonMeshTLS. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *InboundNonMeshTLS) DeepCopyInterface() interface{} { // DeepCopyInto supports using OutboundNonMeshTLS within kubernetes types, where deepcopy-gen is used. func (in *OutboundNonMeshTLS) DeepCopyInto(out *OutboundNonMeshTLS) { - p := proto.Clone(in).(*OutboundNonMeshTLS) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundNonMeshTLS. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *OutboundNonMeshTLS) DeepCopyInterface() interface{} { // DeepCopyInto supports using MeshInboundValidationContext within kubernetes types, where deepcopy-gen is used. func (in *MeshInboundValidationContext) DeepCopyInto(out *MeshInboundValidationContext) { - p := proto.Clone(in).(*MeshInboundValidationContext) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshInboundValidationContext. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *MeshInboundValidationContext) DeepCopyInterface() interface{} { // DeepCopyInto supports using MeshOutboundValidationContext within kubernetes types, where deepcopy-gen is used. func (in *MeshOutboundValidationContext) DeepCopyInto(out *MeshOutboundValidationContext) { - p := proto.Clone(in).(*MeshOutboundValidationContext) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshOutboundValidationContext. Required by controller-gen. @@ -175,8 +175,8 @@ func (in *MeshOutboundValidationContext) DeepCopyInterface() interface{} { // DeepCopyInto supports using NonMeshOutboundValidationContext within kubernetes types, where deepcopy-gen is used. func (in *NonMeshOutboundValidationContext) DeepCopyInto(out *NonMeshOutboundValidationContext) { - p := proto.Clone(in).(*NonMeshOutboundValidationContext) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonMeshOutboundValidationContext. Required by controller-gen. @@ -196,8 +196,8 @@ func (in *NonMeshOutboundValidationContext) DeepCopyInterface() interface{} { // DeepCopyInto supports using SDSCertificate within kubernetes types, where deepcopy-gen is used. func (in *SDSCertificate) DeepCopyInto(out *SDSCertificate) { - p := proto.Clone(in).(*SDSCertificate) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SDSCertificate. Required by controller-gen. @@ -217,8 +217,8 @@ func (in *SDSCertificate) DeepCopyInterface() interface{} { // DeepCopyInto supports using TLSParameters within kubernetes types, where deepcopy-gen is used. func (in *TLSParameters) DeepCopyInto(out *TLSParameters) { - p := proto.Clone(in).(*TLSParameters) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSParameters. Required by controller-gen. @@ -238,8 +238,8 @@ func (in *TLSParameters) DeepCopyInterface() interface{} { // DeepCopyInto supports using LeafCertificate within kubernetes types, where deepcopy-gen is used. func (in *LeafCertificate) DeepCopyInto(out *LeafCertificate) { - p := proto.Clone(in).(*LeafCertificate) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeafCertificate. Required by controller-gen. @@ -259,8 +259,8 @@ func (in *LeafCertificate) DeepCopyInterface() interface{} { // DeepCopyInto supports using TrustBundle within kubernetes types, where deepcopy-gen is used. func (in *TrustBundle) DeepCopyInto(out *TrustBundle) { - p := proto.Clone(in).(*TrustBundle) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustBundle. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/proxy_configuration_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/proxy_configuration_deepcopy.gen.go index 8c185f882017..bc5b81a9a6fb 100644 --- a/proto-public/pbmesh/v2beta1/proxy_configuration_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/proxy_configuration_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ProxyConfiguration within kubernetes types, where deepcopy-gen is used. func (in *ProxyConfiguration) DeepCopyInto(out *ProxyConfiguration) { - p := proto.Clone(in).(*ProxyConfiguration) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfiguration. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *ProxyConfiguration) DeepCopyInterface() interface{} { // DeepCopyInto supports using DynamicConfig within kubernetes types, where deepcopy-gen is used. func (in *DynamicConfig) DeepCopyInto(out *DynamicConfig) { - p := proto.Clone(in).(*DynamicConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicConfig. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *DynamicConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using TransparentProxy within kubernetes types, where deepcopy-gen is used. func (in *TransparentProxy) DeepCopyInto(out *TransparentProxy) { - p := proto.Clone(in).(*TransparentProxy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransparentProxy. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *TransparentProxy) DeepCopyInterface() interface{} { // DeepCopyInto supports using BootstrapConfig within kubernetes types, where deepcopy-gen is used. func (in *BootstrapConfig) DeepCopyInto(out *BootstrapConfig) { - p := proto.Clone(in).(*BootstrapConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapConfig. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *BootstrapConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using AccessLogsConfig within kubernetes types, where deepcopy-gen is used. func (in *AccessLogsConfig) DeepCopyInto(out *AccessLogsConfig) { - p := proto.Clone(in).(*AccessLogsConfig) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogsConfig. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *AccessLogsConfig) DeepCopyInterface() interface{} { // DeepCopyInto supports using EnvoyExtension within kubernetes types, where deepcopy-gen is used. func (in *EnvoyExtension) DeepCopyInto(out *EnvoyExtension) { - p := proto.Clone(in).(*EnvoyExtension) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyExtension. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/proxy_state_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/proxy_state_deepcopy.gen.go index c6d34360e0f9..fbc6ed842098 100644 --- a/proto-public/pbmesh/v2beta1/proxy_state_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/proxy_state_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ProxyStateTemplate within kubernetes types, where deepcopy-gen is used. func (in *ProxyStateTemplate) DeepCopyInto(out *ProxyStateTemplate) { - p := proto.Clone(in).(*ProxyStateTemplate) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStateTemplate. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *ProxyStateTemplate) DeepCopyInterface() interface{} { // DeepCopyInto supports using ProxyState within kubernetes types, where deepcopy-gen is used. func (in *ProxyState) DeepCopyInto(out *ProxyState) { - p := proto.Clone(in).(*ProxyState) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyState. Required by controller-gen. diff --git a/proto-public/pbmesh/v2beta1/tcp_route_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/tcp_route_deepcopy.gen.go index c2bd4afc57f8..487209766f96 100644 --- a/proto-public/pbmesh/v2beta1/tcp_route_deepcopy.gen.go +++ b/proto-public/pbmesh/v2beta1/tcp_route_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using TCPRoute within kubernetes types, where deepcopy-gen is used. func (in *TCPRoute) DeepCopyInto(out *TCPRoute) { - p := proto.Clone(in).(*TCPRoute) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRoute. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *TCPRoute) DeepCopyInterface() interface{} { // DeepCopyInto supports using TCPRouteRule within kubernetes types, where deepcopy-gen is used. func (in *TCPRouteRule) DeepCopyInto(out *TCPRouteRule) { - p := proto.Clone(in).(*TCPRouteRule) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteRule. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *TCPRouteRule) DeepCopyInterface() interface{} { // DeepCopyInto supports using TCPBackendRef within kubernetes types, where deepcopy-gen is used. func (in *TCPBackendRef) DeepCopyInto(out *TCPBackendRef) { - p := proto.Clone(in).(*TCPBackendRef) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPBackendRef. Required by controller-gen. diff --git a/proto-public/pbresource/annotations_deepcopy.gen.go b/proto-public/pbresource/annotations_deepcopy.gen.go index a4265c38505c..0693ee89e2e5 100644 --- a/proto-public/pbresource/annotations_deepcopy.gen.go +++ b/proto-public/pbresource/annotations_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using ResourceTypeSpec within kubernetes types, where deepcopy-gen is used. func (in *ResourceTypeSpec) DeepCopyInto(out *ResourceTypeSpec) { - p := proto.Clone(in).(*ResourceTypeSpec) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTypeSpec. Required by controller-gen. diff --git a/proto-public/pbresource/resource_deepcopy.gen.go b/proto-public/pbresource/resource_deepcopy.gen.go index 92fdb8da2f46..5b81e6f9dfed 100644 --- a/proto-public/pbresource/resource_deepcopy.gen.go +++ b/proto-public/pbresource/resource_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Type within kubernetes types, where deepcopy-gen is used. func (in *Type) DeepCopyInto(out *Type) { - p := proto.Clone(in).(*Type) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Type. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *Type) DeepCopyInterface() interface{} { // DeepCopyInto supports using Tenancy within kubernetes types, where deepcopy-gen is used. func (in *Tenancy) DeepCopyInto(out *Tenancy) { - p := proto.Clone(in).(*Tenancy) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tenancy. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *Tenancy) DeepCopyInterface() interface{} { // DeepCopyInto supports using ID within kubernetes types, where deepcopy-gen is used. func (in *ID) DeepCopyInto(out *ID) { - p := proto.Clone(in).(*ID) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ID. Required by controller-gen. @@ -70,8 +70,8 @@ func (in *ID) DeepCopyInterface() interface{} { // DeepCopyInto supports using Resource within kubernetes types, where deepcopy-gen is used. func (in *Resource) DeepCopyInto(out *Resource) { - p := proto.Clone(in).(*Resource) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resource. Required by controller-gen. @@ -91,8 +91,8 @@ func (in *Resource) DeepCopyInterface() interface{} { // DeepCopyInto supports using Status within kubernetes types, where deepcopy-gen is used. func (in *Status) DeepCopyInto(out *Status) { - p := proto.Clone(in).(*Status) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. Required by controller-gen. @@ -112,8 +112,8 @@ func (in *Status) DeepCopyInterface() interface{} { // DeepCopyInto supports using Condition within kubernetes types, where deepcopy-gen is used. func (in *Condition) DeepCopyInto(out *Condition) { - p := proto.Clone(in).(*Condition) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. Required by controller-gen. @@ -133,8 +133,8 @@ func (in *Condition) DeepCopyInterface() interface{} { // DeepCopyInto supports using Reference within kubernetes types, where deepcopy-gen is used. func (in *Reference) DeepCopyInto(out *Reference) { - p := proto.Clone(in).(*Reference) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reference. Required by controller-gen. @@ -154,8 +154,8 @@ func (in *Reference) DeepCopyInterface() interface{} { // DeepCopyInto supports using Tombstone within kubernetes types, where deepcopy-gen is used. func (in *Tombstone) DeepCopyInto(out *Tombstone) { - p := proto.Clone(in).(*Tombstone) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tombstone. Required by controller-gen. @@ -175,8 +175,8 @@ func (in *Tombstone) DeepCopyInterface() interface{} { // DeepCopyInto supports using ReadRequest within kubernetes types, where deepcopy-gen is used. func (in *ReadRequest) DeepCopyInto(out *ReadRequest) { - p := proto.Clone(in).(*ReadRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadRequest. Required by controller-gen. @@ -196,8 +196,8 @@ func (in *ReadRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using ReadResponse within kubernetes types, where deepcopy-gen is used. func (in *ReadResponse) DeepCopyInto(out *ReadResponse) { - p := proto.Clone(in).(*ReadResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadResponse. Required by controller-gen. @@ -217,8 +217,8 @@ func (in *ReadResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using ListRequest within kubernetes types, where deepcopy-gen is used. func (in *ListRequest) DeepCopyInto(out *ListRequest) { - p := proto.Clone(in).(*ListRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListRequest. Required by controller-gen. @@ -238,8 +238,8 @@ func (in *ListRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using ListResponse within kubernetes types, where deepcopy-gen is used. func (in *ListResponse) DeepCopyInto(out *ListResponse) { - p := proto.Clone(in).(*ListResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListResponse. Required by controller-gen. @@ -259,8 +259,8 @@ func (in *ListResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using ListByOwnerRequest within kubernetes types, where deepcopy-gen is used. func (in *ListByOwnerRequest) DeepCopyInto(out *ListByOwnerRequest) { - p := proto.Clone(in).(*ListByOwnerRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListByOwnerRequest. Required by controller-gen. @@ -280,8 +280,8 @@ func (in *ListByOwnerRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using ListByOwnerResponse within kubernetes types, where deepcopy-gen is used. func (in *ListByOwnerResponse) DeepCopyInto(out *ListByOwnerResponse) { - p := proto.Clone(in).(*ListByOwnerResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListByOwnerResponse. Required by controller-gen. @@ -301,8 +301,8 @@ func (in *ListByOwnerResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using WriteRequest within kubernetes types, where deepcopy-gen is used. func (in *WriteRequest) DeepCopyInto(out *WriteRequest) { - p := proto.Clone(in).(*WriteRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WriteRequest. Required by controller-gen. @@ -322,8 +322,8 @@ func (in *WriteRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using WriteResponse within kubernetes types, where deepcopy-gen is used. func (in *WriteResponse) DeepCopyInto(out *WriteResponse) { - p := proto.Clone(in).(*WriteResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WriteResponse. Required by controller-gen. @@ -343,8 +343,8 @@ func (in *WriteResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using WriteStatusRequest within kubernetes types, where deepcopy-gen is used. func (in *WriteStatusRequest) DeepCopyInto(out *WriteStatusRequest) { - p := proto.Clone(in).(*WriteStatusRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WriteStatusRequest. Required by controller-gen. @@ -364,8 +364,8 @@ func (in *WriteStatusRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using WriteStatusResponse within kubernetes types, where deepcopy-gen is used. func (in *WriteStatusResponse) DeepCopyInto(out *WriteStatusResponse) { - p := proto.Clone(in).(*WriteStatusResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WriteStatusResponse. Required by controller-gen. @@ -385,8 +385,8 @@ func (in *WriteStatusResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using DeleteRequest within kubernetes types, where deepcopy-gen is used. func (in *DeleteRequest) DeepCopyInto(out *DeleteRequest) { - p := proto.Clone(in).(*DeleteRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteRequest. Required by controller-gen. @@ -406,8 +406,8 @@ func (in *DeleteRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using DeleteResponse within kubernetes types, where deepcopy-gen is used. func (in *DeleteResponse) DeepCopyInto(out *DeleteResponse) { - p := proto.Clone(in).(*DeleteResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteResponse. Required by controller-gen. @@ -427,8 +427,8 @@ func (in *DeleteResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using WatchListRequest within kubernetes types, where deepcopy-gen is used. func (in *WatchListRequest) DeepCopyInto(out *WatchListRequest) { - p := proto.Clone(in).(*WatchListRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchListRequest. Required by controller-gen. @@ -448,8 +448,8 @@ func (in *WatchListRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using WatchEvent within kubernetes types, where deepcopy-gen is used. func (in *WatchEvent) DeepCopyInto(out *WatchEvent) { - p := proto.Clone(in).(*WatchEvent) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchEvent. Required by controller-gen. diff --git a/proto-public/pbserverdiscovery/serverdiscovery_deepcopy.gen.go b/proto-public/pbserverdiscovery/serverdiscovery_deepcopy.gen.go index 6d5016561427..2bd82a8a891a 100644 --- a/proto-public/pbserverdiscovery/serverdiscovery_deepcopy.gen.go +++ b/proto-public/pbserverdiscovery/serverdiscovery_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using WatchServersRequest within kubernetes types, where deepcopy-gen is used. func (in *WatchServersRequest) DeepCopyInto(out *WatchServersRequest) { - p := proto.Clone(in).(*WatchServersRequest) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchServersRequest. Required by controller-gen. @@ -28,8 +28,8 @@ func (in *WatchServersRequest) DeepCopyInterface() interface{} { // DeepCopyInto supports using WatchServersResponse within kubernetes types, where deepcopy-gen is used. func (in *WatchServersResponse) DeepCopyInto(out *WatchServersResponse) { - p := proto.Clone(in).(*WatchServersResponse) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchServersResponse. Required by controller-gen. @@ -49,8 +49,8 @@ func (in *WatchServersResponse) DeepCopyInterface() interface{} { // DeepCopyInto supports using Server within kubernetes types, where deepcopy-gen is used. func (in *Server) DeepCopyInto(out *Server) { - p := proto.Clone(in).(*Server) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. Required by controller-gen. diff --git a/proto-public/pbtenancy/v1alpha1/namespace_deepcopy.gen.go b/proto-public/pbtenancy/v1alpha1/namespace_deepcopy.gen.go index 52ee3d7e7cc7..97af531ab333 100644 --- a/proto-public/pbtenancy/v1alpha1/namespace_deepcopy.gen.go +++ b/proto-public/pbtenancy/v1alpha1/namespace_deepcopy.gen.go @@ -7,8 +7,8 @@ import ( // DeepCopyInto supports using Namespace within kubernetes types, where deepcopy-gen is used. func (in *Namespace) DeepCopyInto(out *Namespace) { - p := proto.Clone(in).(*Namespace) - *out = *p + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. Required by controller-gen. From 9b0f4b7fc5e65390ef04a01edbf5b98f6ee0f95e Mon Sep 17 00:00:00 2001 From: Michael Zalimeni Date: Fri, 13 Oct 2023 17:12:36 -0400 Subject: [PATCH 003/130] chore: update version and nightly CI for 1.17 (#19208) Update version file to 1.18-dev, and replace 1.13 nightly test with 1.17. --- .../{nightly-test-1.13.x.yaml => nightly-test-1.17.x.yaml} | 6 +++--- version/VERSION | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) rename .github/workflows/{nightly-test-1.13.x.yaml => nightly-test-1.17.x.yaml} (98%) diff --git a/.github/workflows/nightly-test-1.13.x.yaml b/.github/workflows/nightly-test-1.17.x.yaml similarity index 98% rename from .github/workflows/nightly-test-1.13.x.yaml rename to .github/workflows/nightly-test-1.17.x.yaml index f314a475dfbd..df6f8d946f83 100644 --- a/.github/workflows/nightly-test-1.13.x.yaml +++ b/.github/workflows/nightly-test-1.17.x.yaml @@ -1,7 +1,7 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly Frontend Test 1.13.x +name: Nightly Frontend Test 1.17.x on: schedule: - cron: '0 4 * * *' @@ -9,8 +9,8 @@ on: env: EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition - BRANCH: "release/1.13.x" - BRANCH_NAME: "release-1.13.x" # Used for naming artifacts + BRANCH: "release/1.17.x" + BRANCH_NAME: "release-1.17.x" # Used for naming artifacts GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: diff --git a/version/VERSION b/version/VERSION index ee8855caa4a7..ee017091ff37 100644 --- a/version/VERSION +++ b/version/VERSION @@ -1 +1 @@ -1.17.0-dev +1.18.0-dev From e94d6ceca6ba825101e6591b37410579c68ab2cb Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Fri, 13 Oct 2023 15:21:39 -0600 Subject: [PATCH 004/130] mesh: add validation hook to proxy configuration (#19186) --- .../proxyconfiguration/sort_test.go | 3 +- internal/mesh/internal/types/errors.go | 14 + .../internal/types/proxy_configuration.go | 174 ++++++++- .../types/proxy_configuration_test.go | 193 +++++++++- internal/resource/errors.go | 19 +- .../pbmesh/v2beta1/proxy_configuration.pb.go | 329 +++++++++--------- .../pbmesh/v2beta1/proxy_configuration.proto | 14 +- proto-public/pbmesh/v2beta1/routing.pb.go | 2 +- proto-public/pbmesh/v2beta1/routing.proto | 2 +- 9 files changed, 555 insertions(+), 195 deletions(-) create mode 100644 internal/mesh/internal/types/errors.go diff --git a/internal/mesh/internal/controllers/proxyconfiguration/sort_test.go b/internal/mesh/internal/controllers/proxyconfiguration/sort_test.go index 36033a47e477..1fbf8254ee60 100644 --- a/internal/mesh/internal/controllers/proxyconfiguration/sort_test.go +++ b/internal/mesh/internal/controllers/proxyconfiguration/sort_test.go @@ -99,7 +99,8 @@ func TestSortProxyConfigurations(t *testing.T) { var decProxyCfgs []*types.DecodedProxyConfiguration for i, ws := range c.selectors { proxyCfg := &pbmesh.ProxyConfiguration{ - Workloads: ws, + Workloads: ws, + DynamicConfig: &pbmesh.DynamicConfig{}, } resName := fmt.Sprintf("cfg-%d", i) proxyCfgRes := resourcetest.Resource(pbmesh.ProxyConfigurationType, resName). diff --git a/internal/mesh/internal/types/errors.go b/internal/mesh/internal/types/errors.go new file mode 100644 index 000000000000..e38085b49fa2 --- /dev/null +++ b/internal/mesh/internal/types/errors.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "errors" +) + +var ( + errInvalidPort = errors.New("port number is outside the range 1 to 65535") + errInvalidExposePathProtocol = errors.New("invalid protocol: only HTTP and HTTP2 protocols are allowed") + errMissingProxyConfigData = errors.New("at least one of \"bootstrap_config\" or \"dynamic_config\" fields must be set") +) diff --git a/internal/mesh/internal/types/proxy_configuration.go b/internal/mesh/internal/types/proxy_configuration.go index 590ee27aeb1b..0c9ac05147e1 100644 --- a/internal/mesh/internal/types/proxy_configuration.go +++ b/internal/mesh/internal/types/proxy_configuration.go @@ -4,9 +4,12 @@ package types import ( + "math" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -53,23 +56,174 @@ func MutateProxyConfiguration(res *pbresource.Resource) error { } func ValidateProxyConfiguration(res *pbresource.Resource) error { - var cfg pbmesh.ProxyConfiguration - - if err := res.Data.UnmarshalTo(&cfg); err != nil { - return resource.NewErrDataParse(&cfg, err) + decodedProxyCfg, decodeErr := resource.Decode[*pbmesh.ProxyConfiguration](res) + if decodeErr != nil { + return resource.NewErrDataParse(decodedProxyCfg.GetData(), decodeErr) } + proxyCfg := decodedProxyCfg.GetData() - var merr error + var err error - // Validate the workload selector - if selErr := catalog.ValidateSelector(cfg.Workloads, false); selErr != nil { - merr = multierror.Append(merr, resource.ErrInvalidField{ + if selErr := catalog.ValidateSelector(proxyCfg.Workloads, false); selErr != nil { + err = multierror.Append(err, resource.ErrInvalidField{ Name: "workloads", Wrapped: selErr, }) } - // TODO(rb): add more validation for proxy configuration + if proxyCfg.GetDynamicConfig() == nil && proxyCfg.GetBootstrapConfig() == nil { + err = multierror.Append(err, resource.ErrInvalidFields{ + Names: []string{"dynamic_config", "bootstrap_config"}, + Wrapped: errMissingProxyConfigData, + }) + } + + // nolint:staticcheck + if proxyCfg.GetOpaqueConfig() != nil { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "opaque_config", + Wrapped: resource.ErrUnsupported, + }) + } + + if dynamicCfgErr := validateDynamicProxyConfiguration(proxyCfg.GetDynamicConfig()); dynamicCfgErr != nil { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "dynamic_config", + Wrapped: dynamicCfgErr, + }) + } + + return err +} + +func validateDynamicProxyConfiguration(cfg *pbmesh.DynamicConfig) error { + if cfg == nil { + return nil + } + + var err error + + // Error if any of the currently unsupported fields is set. + if cfg.GetMutualTlsMode() != pbmesh.MutualTLSMode_MUTUAL_TLS_MODE_DEFAULT { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "mutual_tls_mode", + Wrapped: resource.ErrUnsupported, + }) + } + + if cfg.GetMeshGatewayMode() != pbmesh.MeshGatewayMode_MESH_GATEWAY_MODE_UNSPECIFIED { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "mesh_gateway_mode", + Wrapped: resource.ErrUnsupported, + }) + } - return merr + if cfg.GetAccessLogs() != nil { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "access_logs", + Wrapped: resource.ErrUnsupported, + }) + } + + if cfg.GetPublicListenerJson() != "" { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "public_listener_json", + Wrapped: resource.ErrUnsupported, + }) + } + + if cfg.GetListenerTracingJson() != "" { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "listener_tracing_json", + Wrapped: resource.ErrUnsupported, + }) + } + + if cfg.GetLocalClusterJson() != "" { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "local_cluster_json", + Wrapped: resource.ErrUnsupported, + }) + } + + // nolint:staticcheck + if cfg.GetLocalWorkloadAddress() != "" { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "local_workload_address", + Wrapped: resource.ErrUnsupported, + }) + } + + // nolint:staticcheck + if cfg.GetLocalWorkloadPort() != 0 { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "local_workload_port", + Wrapped: resource.ErrUnsupported, + }) + } + + // nolint:staticcheck + if cfg.GetLocalWorkloadSocketPath() != "" { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "local_workload_socket_path", + Wrapped: resource.ErrUnsupported, + }) + } + + if tproxyCfg := cfg.GetTransparentProxy(); tproxyCfg != nil { + if tproxyCfg.DialedDirectly { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "transparent_proxy", + Wrapped: resource.ErrInvalidField{ + Name: "dialed_directly", + Wrapped: resource.ErrUnsupported, + }, + }) + } + + if outboundListenerPortErr := validatePort(tproxyCfg.OutboundListenerPort, "outbound_listener_port"); outboundListenerPortErr != nil { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "transparent_proxy", + Wrapped: outboundListenerPortErr, + }) + } + } + + if exposeCfg := cfg.GetExposeConfig(); exposeCfg != nil { + for i, path := range exposeCfg.GetExposePaths() { + if listenerPortErr := validatePort(path.ListenerPort, "listener_port"); listenerPortErr != nil { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "expose_config", + Wrapped: resource.ErrInvalidListElement{ + Name: "expose_paths", + Index: i, + Wrapped: listenerPortErr, + }, + }) + } + + if localPathPortErr := validatePort(path.LocalPathPort, "local_path_port"); localPathPortErr != nil { + err = multierror.Append(err, resource.ErrInvalidField{ + Name: "expose_config", + Wrapped: resource.ErrInvalidListElement{ + Name: "expose_paths", + Index: i, + Wrapped: localPathPortErr, + }, + }) + } + } + } + + return err +} + +func validatePort(port uint32, fieldName string) error { + if port < 1 || port > math.MaxUint16 { + return resource.ErrInvalidField{ + Name: fieldName, + Wrapped: errInvalidPort, + } + } + return nil } diff --git a/internal/mesh/internal/types/proxy_configuration_test.go b/internal/mesh/internal/types/proxy_configuration_test.go index 44d8b8820af4..c504ff2bd03d 100644 --- a/internal/mesh/internal/types/proxy_configuration_test.go +++ b/internal/mesh/internal/types/proxy_configuration_test.go @@ -4,9 +4,12 @@ package types import ( + "math" "testing" + "github.com/hashicorp/go-multierror" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/structpb" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" @@ -86,7 +89,194 @@ func TestMutateProxyConfiguration(t *testing.T) { } } -func TestValidateProxyConfiguration(t *testing.T) { +func TestValidateProxyConfiguration_MissingBothDynamicAndBootstrapConfig(t *testing.T) { + proxyCfg := &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, + } + + res := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test"). + WithData(t, proxyCfg). + Build() + + err := ValidateProxyConfiguration(res) + + var expError error + expError = multierror.Append(expError, + resource.ErrInvalidFields{ + Names: []string{"dynamic_config", "bootstrap_config"}, + Wrapped: errMissingProxyConfigData, + }, + ) + require.Equal(t, err, expError) +} + +func TestValidateProxyConfiguration_AllFieldsInvalid(t *testing.T) { + proxyCfg := &pbmesh.ProxyConfiguration{ + // Omit workload selector. + + DynamicConfig: &pbmesh.DynamicConfig{ + // Set unsupported fields. + MutualTlsMode: pbmesh.MutualTLSMode_MUTUAL_TLS_MODE_PERMISSIVE, + MeshGatewayMode: pbmesh.MeshGatewayMode_MESH_GATEWAY_MODE_LOCAL, + AccessLogs: &pbmesh.AccessLogsConfig{}, + PublicListenerJson: "listener-json", + ListenerTracingJson: "tracing-json", + LocalClusterJson: "cluster-json", + LocalWorkloadAddress: "1.1.1.1", + LocalWorkloadPort: 1234, + LocalWorkloadSocketPath: "/foo/bar", + + TransparentProxy: &pbmesh.TransparentProxy{ + DialedDirectly: true, // unsupported + OutboundListenerPort: math.MaxUint16 + 1, // invalid + }, + + // Create invalid expose paths config. + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 0, + LocalPathPort: math.MaxUint16 + 1, + }, + }, + }, + }, + + OpaqueConfig: &structpb.Struct{}, + } + + res := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test"). + WithData(t, proxyCfg). + Build() + + err := ValidateProxyConfiguration(res) + + var dynamicCfgErr error + unsupportedFields := []string{ + "mutual_tls_mode", + "mesh_gateway_mode", + "access_logs", + "public_listener_json", + "listener_tracing_json", + "local_cluster_json", + "local_workload_address", + "local_workload_port", + "local_workload_socket_path", + } + for _, f := range unsupportedFields { + dynamicCfgErr = multierror.Append(dynamicCfgErr, + resource.ErrInvalidField{ + Name: f, + Wrapped: resource.ErrUnsupported, + }, + ) + } + dynamicCfgErr = multierror.Append(dynamicCfgErr, + resource.ErrInvalidField{ + Name: "transparent_proxy", + Wrapped: resource.ErrInvalidField{ + Name: "dialed_directly", + Wrapped: resource.ErrUnsupported, + }, + }, + resource.ErrInvalidField{ + Name: "transparent_proxy", + Wrapped: resource.ErrInvalidField{ + Name: "outbound_listener_port", + Wrapped: errInvalidPort, + }, + }, + resource.ErrInvalidField{ + Name: "expose_config", + Wrapped: resource.ErrInvalidListElement{ + Name: "expose_paths", + Wrapped: resource.ErrInvalidField{ + Name: "listener_port", + Wrapped: errInvalidPort, + }, + }, + }, + resource.ErrInvalidField{ + Name: "expose_config", + Wrapped: resource.ErrInvalidListElement{ + Name: "expose_paths", + Wrapped: resource.ErrInvalidField{ + Name: "local_path_port", + Wrapped: errInvalidPort, + }, + }, + }, + ) + + var expError error + expError = multierror.Append(expError, + resource.ErrInvalidField{ + Name: "workloads", + Wrapped: resource.ErrEmpty, + }, + resource.ErrInvalidField{ + Name: "opaque_config", + Wrapped: resource.ErrUnsupported, + }, + resource.ErrInvalidField{ + Name: "dynamic_config", + Wrapped: dynamicCfgErr, + }, + ) + + require.Equal(t, err, expError) +} + +func TestValidateProxyConfiguration_AllFieldsValid(t *testing.T) { + proxyCfg := &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, + + DynamicConfig: &pbmesh.DynamicConfig{ + MutualTlsMode: pbmesh.MutualTLSMode_MUTUAL_TLS_MODE_DEFAULT, + MeshGatewayMode: pbmesh.MeshGatewayMode_MESH_GATEWAY_MODE_UNSPECIFIED, + + TransparentProxy: &pbmesh.TransparentProxy{ + DialedDirectly: false, + OutboundListenerPort: 15500, + }, + + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 1234, + LocalPathPort: 1235, + }, + }, + }, + }, + + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsdUrl: "stats-url", + DogstatsdUrl: "dogstats-url", + StatsTags: []string{"tags"}, + PrometheusBindAddr: "prom-bind-addr", + StatsBindAddr: "stats-bind-addr", + ReadyBindAddr: "ready-bind-addr", + OverrideJsonTpl: "override-json-tpl", + StaticClustersJson: "static-clusters-json", + StaticListenersJson: "static-listeners-json", + StatsSinksJson: "stats-sinks-json", + StatsConfigJson: "stats-config-json", + StatsFlushInterval: "stats-flush-interval", + TracingConfigJson: "tracing-config-json", + TelemetryCollectorBindSocketDir: "telemetry-collector-bind-socket-dir", + }, + } + + res := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test"). + WithData(t, proxyCfg). + Build() + + err := ValidateProxyConfiguration(res) + require.NoError(t, err) +} + +func TestValidateProxyConfiguration_WorkloadSelector(t *testing.T) { type testcase struct { data *pbmesh.ProxyConfiguration expectErr string @@ -146,6 +336,7 @@ func TestValidateProxyConfiguration(t *testing.T) { Names: []string{"blah"}, Filter: "metadata.foo == bar", }, + DynamicConfig: &pbmesh.DynamicConfig{}, }, }, } diff --git a/internal/resource/errors.go b/internal/resource/errors.go index dbb624c8d004..2003d86cbf71 100644 --- a/internal/resource/errors.go +++ b/internal/resource/errors.go @@ -5,15 +5,18 @@ package resource import ( "fmt" + "strings" - "github.com/hashicorp/consul/proto-public/pbresource" "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/hashicorp/consul/proto-public/pbresource" ) var ( ErrMissing = NewConstError("missing required field") ErrEmpty = NewConstError("cannot be empty") ErrReferenceTenancyNotEqual = NewConstError("resource tenancy and reference tenancy differ") + ErrUnsupported = NewConstError("field is currently not supported") ) // ConstError is more or less equivalent to the stdlib errors.errorstring. However, having @@ -147,3 +150,17 @@ type ErrInvalidReferenceType struct { func (err ErrInvalidReferenceType) Error() string { return fmt.Sprintf("reference must have type %s", ToGVK(err.AllowedType)) } + +type ErrInvalidFields struct { + Names []string + Wrapped error +} + +func (err ErrInvalidFields) Error() string { + allFields := strings.Join(err.Names, ",") + return fmt.Sprintf("invalid %q fields: %v", allFields, err.Wrapped) +} + +func (err ErrInvalidFields) Unwrap() error { + return err.Wrapped +} diff --git a/proto-public/pbmesh/v2beta1/proxy_configuration.pb.go b/proto-public/pbmesh/v2beta1/proxy_configuration.pb.go index 1b09926b796f..a374848fd20e 100644 --- a/proto-public/pbmesh/v2beta1/proxy_configuration.pb.go +++ b/proto-public/pbmesh/v2beta1/proxy_configuration.pb.go @@ -298,20 +298,19 @@ type DynamicConfig struct { ExposeConfig *ExposeConfig `protobuf:"bytes,7,opt,name=expose_config,json=exposeConfig,proto3" json:"expose_config,omitempty"` // AccessLogs configures the output and format of Envoy access logs AccessLogs *AccessLogsConfig `protobuf:"bytes,8,opt,name=access_logs,json=accessLogs,proto3" json:"access_logs,omitempty"` - EnvoyExtensions []*EnvoyExtension `protobuf:"bytes,9,rep,name=envoy_extensions,json=envoyExtensions,proto3" json:"envoy_extensions,omitempty"` - PublicListenerJson string `protobuf:"bytes,10,opt,name=public_listener_json,json=publicListenerJson,proto3" json:"public_listener_json,omitempty"` - ListenerTracingJson string `protobuf:"bytes,11,opt,name=listener_tracing_json,json=listenerTracingJson,proto3" json:"listener_tracing_json,omitempty"` - LocalClusterJson string `protobuf:"bytes,12,opt,name=local_cluster_json,json=localClusterJson,proto3" json:"local_cluster_json,omitempty"` + PublicListenerJson string `protobuf:"bytes,9,opt,name=public_listener_json,json=publicListenerJson,proto3" json:"public_listener_json,omitempty"` + ListenerTracingJson string `protobuf:"bytes,10,opt,name=listener_tracing_json,json=listenerTracingJson,proto3" json:"listener_tracing_json,omitempty"` + LocalClusterJson string `protobuf:"bytes,11,opt,name=local_cluster_json,json=localClusterJson,proto3" json:"local_cluster_json,omitempty"` // deprecated: // local_workload_address, local_workload_port, and local_workload_socket_path // are deprecated and are only needed for migration of existing resources. // // Deprecated: Marked as deprecated in pbmesh/v2beta1/proxy_configuration.proto. - LocalWorkloadAddress string `protobuf:"bytes,13,opt,name=local_workload_address,json=localWorkloadAddress,proto3" json:"local_workload_address,omitempty"` + LocalWorkloadAddress string `protobuf:"bytes,12,opt,name=local_workload_address,json=localWorkloadAddress,proto3" json:"local_workload_address,omitempty"` // Deprecated: Marked as deprecated in pbmesh/v2beta1/proxy_configuration.proto. - LocalWorkloadPort uint32 `protobuf:"varint,14,opt,name=local_workload_port,json=localWorkloadPort,proto3" json:"local_workload_port,omitempty"` + LocalWorkloadPort uint32 `protobuf:"varint,13,opt,name=local_workload_port,json=localWorkloadPort,proto3" json:"local_workload_port,omitempty"` // Deprecated: Marked as deprecated in pbmesh/v2beta1/proxy_configuration.proto. - LocalWorkloadSocketPath string `protobuf:"bytes,15,opt,name=local_workload_socket_path,json=localWorkloadSocketPath,proto3" json:"local_workload_socket_path,omitempty"` + LocalWorkloadSocketPath string `protobuf:"bytes,14,opt,name=local_workload_socket_path,json=localWorkloadSocketPath,proto3" json:"local_workload_socket_path,omitempty"` } func (x *DynamicConfig) Reset() { @@ -402,13 +401,6 @@ func (x *DynamicConfig) GetAccessLogs() *AccessLogsConfig { return nil } -func (x *DynamicConfig) GetEnvoyExtensions() []*EnvoyExtension { - if x != nil { - return x.EnvoyExtensions - } - return nil -} - func (x *DynamicConfig) GetPublicListenerJson() string { if x != nil { return x.PublicListenerJson @@ -886,7 +878,7 @@ var file_pbmesh_v2beta1_proxy_configuration_proto_rawDesc = []byte{ 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x06, - 0xa2, 0x93, 0x04, 0x02, 0x08, 0x03, 0x22, 0xeb, 0x09, 0x0a, 0x0d, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0xa2, 0x93, 0x04, 0x02, 0x08, 0x03, 0x22, 0x91, 0x09, 0x0a, 0x0d, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, @@ -932,154 +924,148 @@ var file_pbmesh_v2beta1_proxy_configuration_proto_rawDesc = []byte{ 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, - 0x67, 0x73, 0x12, 0x58, 0x0a, 0x10, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, - 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x45, 0x6e, 0x76, - 0x6f, 0x79, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x14, - 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, - 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x32, - 0x0a, 0x15, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, - 0x6e, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x4a, 0x73, - 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4a, 0x73, 0x6f, 0x6e, - 0x12, 0x38, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, - 0x6f, 0x61, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x13, 0x6c, 0x6f, - 0x63, 0x61, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x02, 0x18, 0x01, 0x52, 0x11, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3f, - 0x0a, 0x1a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, 0x72, - 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x1a, - 0x73, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, - 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x71, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6f, 0x75, 0x74, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x14, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, - 0x6e, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x27, - 0x0a, 0x0f, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x64, 0x44, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x22, 0x8e, 0x05, 0x0a, 0x0f, 0x42, 0x6f, 0x6f, 0x74, - 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x74, 0x61, 0x74, 0x73, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x6f, - 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x55, 0x72, 0x6c, 0x12, - 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x54, 0x61, 0x67, 0x73, 0x12, 0x30, - 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x62, 0x69, 0x6e, - 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, - 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x73, - 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x65, 0x61, 0x64, - 0x79, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x72, 0x65, 0x61, 0x64, 0x79, 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, - 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x6a, 0x73, 0x6f, - 0x6e, 0x5f, 0x74, 0x70, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x65, 0x4a, 0x73, 0x6f, 0x6e, 0x54, 0x70, 0x6c, 0x12, 0x30, 0x0a, 0x14, - 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x5f, - 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, - 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x32, - 0x0a, 0x15, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x73, - 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x6b, - 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, - 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e, 0x6b, 0x73, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6a, 0x73, 0x6f, - 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, - 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46, 0x6c, 0x75, - 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x72, - 0x61, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6a, 0x73, 0x6f, - 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x23, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x64, 0x69, - 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x69, 0x6e, 0x64, 0x53, - 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x44, 0x69, 0x72, 0x22, 0xf6, 0x01, 0x0a, 0x10, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x67, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, - 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x69, 0x6e, - 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, - 0x1f, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x22, 0xc3, 0x01, 0x0a, 0x0e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x12, 0x35, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x52, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2a, 0x56, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4d, 0x4f, - 0x44, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, - 0x50, 0x41, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x52, 0x4f, 0x58, - 0x59, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x02, 0x2a, - 0x74, 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x53, 0x69, 0x6e, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, - 0x0a, 0x15, 0x4c, 0x4f, 0x47, 0x5f, 0x53, 0x49, 0x4e, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x4f, 0x47, - 0x5f, 0x53, 0x49, 0x4e, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, - 0x01, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x4f, 0x47, 0x5f, 0x53, 0x49, 0x4e, 0x4b, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x54, 0x44, 0x45, 0x52, 0x52, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x4c, - 0x4f, 0x47, 0x5f, 0x53, 0x49, 0x4e, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x44, - 0x4f, 0x55, 0x54, 0x10, 0x03, 0x2a, 0x68, 0x0a, 0x0d, 0x4d, 0x75, 0x74, 0x75, 0x61, 0x6c, 0x54, - 0x4c, 0x53, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x55, 0x54, 0x55, 0x41, 0x4c, - 0x5f, 0x54, 0x4c, 0x53, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, - 0x54, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x4d, 0x55, 0x54, 0x55, 0x41, 0x4c, 0x5f, 0x54, 0x4c, - 0x53, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x10, 0x01, 0x12, - 0x1e, 0x0a, 0x1a, 0x4d, 0x55, 0x54, 0x55, 0x41, 0x4c, 0x5f, 0x54, 0x4c, 0x53, 0x5f, 0x4d, 0x4f, - 0x44, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x56, 0x45, 0x10, 0x02, 0x42, - 0x98, 0x02, 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, - 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x17, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, - 0x68, 0x2f, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x32, - 0x62, 0x65, 0x74, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1d, 0x48, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, - 0x65, 0x73, 0x68, 0x2e, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x1d, 0x48, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, - 0x65, 0x73, 0x68, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xe2, 0x02, 0x29, 0x48, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, - 0x65, 0x73, 0x68, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, - 0x68, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x67, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x54, 0x72, 0x61, + 0x63, 0x69, 0x6e, 0x67, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x32, 0x0a, 0x13, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3f, 0x0a, 0x1a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x77, 0x6f, + 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x17, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x50, 0x61, 0x74, 0x68, 0x1a, 0x73, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x71, 0x0a, 0x10, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x34, + 0x0a, 0x16, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x14, + 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x64, 0x5f, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, + 0x69, 0x61, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x22, 0x8e, 0x05, + 0x0a, 0x0f, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x55, 0x72, 0x6c, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, + 0x54, 0x61, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x42, 0x69, + 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, + 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x73, 0x74, 0x61, 0x74, 0x73, 0x42, 0x69, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, + 0x0a, 0x0f, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x61, 0x64, 0x79, 0x42, 0x69, + 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x74, 0x70, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x4a, 0x73, 0x6f, 0x6e, 0x54, + 0x70, 0x6c, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e, 0x6b, 0x73, 0x4a, 0x73, + 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x73, 0x6f, 0x6e, 0x12, 0x30, + 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x74, + 0x61, 0x74, 0x73, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, + 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x73, 0x6f, 0x6e, + 0x12, 0x4c, 0x0a, 0x23, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x42, 0x69, 0x6e, 0x64, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x44, 0x69, 0x72, 0x22, 0xf6, + 0x01, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x32, 0x0a, + 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4c, 0x6f, 0x67, + 0x73, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, + 0x4c, 0x6f, 0x67, 0x53, 0x69, 0x6e, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x78, + 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0xc3, 0x01, 0x0a, 0x0e, 0x45, 0x6e, 0x76, 0x6f, + 0x79, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, + 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x35, 0x0a, 0x09, 0x61, 0x72, + 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2a, 0x56, 0x0a, + 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, + 0x4f, 0x58, 0x59, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, + 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4d, 0x4f, 0x44, 0x45, + 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x50, 0x41, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x15, + 0x0a, 0x11, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x49, 0x52, + 0x45, 0x43, 0x54, 0x10, 0x02, 0x2a, 0x74, 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x53, 0x69, 0x6e, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x4c, 0x4f, 0x47, 0x5f, 0x53, 0x49, 0x4e, 0x4b, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x4c, 0x4f, 0x47, 0x5f, 0x53, 0x49, 0x4e, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x4f, 0x47, 0x5f, 0x53, + 0x49, 0x4e, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x44, 0x45, 0x52, 0x52, 0x10, + 0x02, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x4f, 0x47, 0x5f, 0x53, 0x49, 0x4e, 0x4b, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x54, 0x44, 0x4f, 0x55, 0x54, 0x10, 0x03, 0x2a, 0x68, 0x0a, 0x0d, 0x4d, + 0x75, 0x74, 0x75, 0x61, 0x6c, 0x54, 0x4c, 0x53, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x17, + 0x4d, 0x55, 0x54, 0x55, 0x41, 0x4c, 0x5f, 0x54, 0x4c, 0x53, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, + 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x4d, 0x55, 0x54, + 0x55, 0x41, 0x4c, 0x5f, 0x54, 0x4c, 0x53, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x52, + 0x49, 0x43, 0x54, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x4d, 0x55, 0x54, 0x55, 0x41, 0x4c, 0x5f, + 0x54, 0x4c, 0x53, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, + 0x49, 0x56, 0x45, 0x10, 0x02, 0x42, 0x98, 0x02, 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, + 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x17, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, + 0x6d, 0x65, 0x73, 0x68, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, + 0x4d, 0xaa, 0x02, 0x1d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x2e, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, + 0x31, 0xca, 0x02, 0x1d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, + 0x31, 0xe2, 0x02, 0x29, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x20, + 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, 0x68, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1127,15 +1113,14 @@ var file_pbmesh_v2beta1_proxy_configuration_proto_depIdxs = []int32{ 13, // 9: hashicorp.consul.mesh.v2beta1.DynamicConfig.mesh_gateway_mode:type_name -> hashicorp.consul.mesh.v2beta1.MeshGatewayMode 14, // 10: hashicorp.consul.mesh.v2beta1.DynamicConfig.expose_config:type_name -> hashicorp.consul.mesh.v2beta1.ExposeConfig 7, // 11: hashicorp.consul.mesh.v2beta1.DynamicConfig.access_logs:type_name -> hashicorp.consul.mesh.v2beta1.AccessLogsConfig - 8, // 12: hashicorp.consul.mesh.v2beta1.DynamicConfig.envoy_extensions:type_name -> hashicorp.consul.mesh.v2beta1.EnvoyExtension - 1, // 13: hashicorp.consul.mesh.v2beta1.AccessLogsConfig.type:type_name -> hashicorp.consul.mesh.v2beta1.LogSinkType - 11, // 14: hashicorp.consul.mesh.v2beta1.EnvoyExtension.arguments:type_name -> google.protobuf.Struct - 15, // 15: hashicorp.consul.mesh.v2beta1.DynamicConfig.LocalConnectionEntry.value:type_name -> hashicorp.consul.mesh.v2beta1.ConnectionConfig - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 1, // 12: hashicorp.consul.mesh.v2beta1.AccessLogsConfig.type:type_name -> hashicorp.consul.mesh.v2beta1.LogSinkType + 11, // 13: hashicorp.consul.mesh.v2beta1.EnvoyExtension.arguments:type_name -> google.protobuf.Struct + 15, // 14: hashicorp.consul.mesh.v2beta1.DynamicConfig.LocalConnectionEntry.value:type_name -> hashicorp.consul.mesh.v2beta1.ConnectionConfig + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_pbmesh_v2beta1_proxy_configuration_proto_init() } diff --git a/proto-public/pbmesh/v2beta1/proxy_configuration.proto b/proto-public/pbmesh/v2beta1/proxy_configuration.proto index eb47e16e2282..9a2d410d868d 100644 --- a/proto-public/pbmesh/v2beta1/proxy_configuration.proto +++ b/proto-public/pbmesh/v2beta1/proxy_configuration.proto @@ -60,18 +60,16 @@ message DynamicConfig { // AccessLogs configures the output and format of Envoy access logs AccessLogsConfig access_logs = 8; - repeated EnvoyExtension envoy_extensions = 9; - - string public_listener_json = 10; - string listener_tracing_json = 11; - string local_cluster_json = 12; + string public_listener_json = 9; + string listener_tracing_json = 10; + string local_cluster_json = 11; // deprecated: // local_workload_address, local_workload_port, and local_workload_socket_path // are deprecated and are only needed for migration of existing resources. - string local_workload_address = 13 [deprecated = true]; - uint32 local_workload_port = 14 [deprecated = true]; - string local_workload_socket_path = 15 [deprecated = true]; + string local_workload_address = 12 [deprecated = true]; + uint32 local_workload_port = 13 [deprecated = true]; + string local_workload_socket_path = 14 [deprecated = true]; } message TransparentProxy { diff --git a/proto-public/pbmesh/v2beta1/routing.pb.go b/proto-public/pbmesh/v2beta1/routing.pb.go index 163632eb3054..4b1f64877431 100644 --- a/proto-public/pbmesh/v2beta1/routing.pb.go +++ b/proto-public/pbmesh/v2beta1/routing.pb.go @@ -29,7 +29,7 @@ type MeshGatewayMode int32 const ( // MESH_GATEWAY_MODE_UNSPECIFIED represents no specific mode and should be - // used to indicate that a the decision on the mode will be made by other + // used to indicate that the decision on the mode will be made by other // configuration or default settings. MeshGatewayMode_MESH_GATEWAY_MODE_UNSPECIFIED MeshGatewayMode = 0 // MESH_GATEWAY_MODE_NONE is the mode to use when traffic should not be diff --git a/proto-public/pbmesh/v2beta1/routing.proto b/proto-public/pbmesh/v2beta1/routing.proto index 5c28840a72dc..b09d28a6458a 100644 --- a/proto-public/pbmesh/v2beta1/routing.proto +++ b/proto-public/pbmesh/v2beta1/routing.proto @@ -9,7 +9,7 @@ package hashicorp.consul.mesh.v2beta1; // +kubebuilder:validation:Type=string enum MeshGatewayMode { // MESH_GATEWAY_MODE_UNSPECIFIED represents no specific mode and should be - // used to indicate that a the decision on the mode will be made by other + // used to indicate that the decision on the mode will be made by other // configuration or default settings. MESH_GATEWAY_MODE_UNSPECIFIED = 0; From 2ea33e9b865330b7b1a77755244b7d16b1afe222 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Fri, 13 Oct 2023 16:52:20 -0600 Subject: [PATCH 005/130] mesh: add more validations to Destinations resource (#19202) --- command/resource/testdata/nested_data.hcl | 1 + internal/catalog/exports.go | 8 + .../catalog/internal/types/failover_policy.go | 4 +- internal/catalog/internal/types/service.go | 2 +- .../internal/types/service_endpoints.go | 2 +- internal/catalog/internal/types/validators.go | 8 +- .../catalog/internal/types/validators_test.go | 8 +- internal/catalog/internal/types/workload.go | 2 +- .../sidecarproxy/controller_test.go | 4 +- internal/mesh/internal/types/destinations.go | 81 ++++++- .../mesh/internal/types/destinations_test.go | 201 ++++++++++++++++-- internal/mesh/internal/types/errors.go | 2 + internal/resource/errors.go | 1 + 13 files changed, 288 insertions(+), 36 deletions(-) diff --git a/command/resource/testdata/nested_data.hcl b/command/resource/testdata/nested_data.hcl index aab8aa401ee9..b62875c732b4 100644 --- a/command/resource/testdata/nested_data.hcl +++ b/command/resource/testdata/nested_data.hcl @@ -21,6 +21,7 @@ Data { DestinationPort = "tcp" IpPort = { + Ip = "127.0.0.1" Port = 1234 } } diff --git a/internal/catalog/exports.go b/internal/catalog/exports.go index c4e70ffbefe1..b74149d2aec4 100644 --- a/internal/catalog/exports.go +++ b/internal/catalog/exports.go @@ -110,3 +110,11 @@ func ValidateLocalServiceRefNoSection(ref *pbresource.Reference, wrapErr func(er func ValidateSelector(sel *pbcatalog.WorkloadSelector, allowEmpty bool) error { return types.ValidateSelector(sel, allowEmpty) } + +func ValidatePortName(name string) error { + return types.ValidatePortName(name) +} + +func IsValidUnixSocketPath(host string) bool { + return types.IsValidUnixSocketPath(host) +} diff --git a/internal/catalog/internal/types/failover_policy.go b/internal/catalog/internal/types/failover_policy.go index 4dc8b1bd8eb0..620c3f590e97 100644 --- a/internal/catalog/internal/types/failover_policy.go +++ b/internal/catalog/internal/types/failover_policy.go @@ -145,7 +145,7 @@ func ValidateFailoverPolicy(res *pbresource.Resource) error { Wrapped: err, } } - if portNameErr := validatePortName(portName); portNameErr != nil { + if portNameErr := ValidatePortName(portName); portNameErr != nil { merr = multierror.Append(merr, resource.ErrInvalidMapKey{ Map: "port_configs", Key: portName, @@ -245,7 +245,7 @@ func validateFailoverPolicyDestination(dest *pbcatalog.FailoverDestination, port // assumed and will be reconciled. if dest.Port != "" { if ported { - if portNameErr := validatePortName(dest.Port); portNameErr != nil { + if portNameErr := ValidatePortName(dest.Port); portNameErr != nil { merr = multierror.Append(merr, wrapErr(resource.ErrInvalidField{ Name: "port", Wrapped: portNameErr, diff --git a/internal/catalog/internal/types/service.go b/internal/catalog/internal/types/service.go index 4cefb362e78f..9fa703641c7f 100644 --- a/internal/catalog/internal/types/service.go +++ b/internal/catalog/internal/types/service.go @@ -89,7 +89,7 @@ func ValidateService(res *pbresource.Resource) error { } // validate the target port - if nameErr := validatePortName(port.TargetPort); nameErr != nil { + if nameErr := ValidatePortName(port.TargetPort); nameErr != nil { err = multierror.Append(err, resource.ErrInvalidListElement{ Name: "ports", Index: idx, diff --git a/internal/catalog/internal/types/service_endpoints.go b/internal/catalog/internal/types/service_endpoints.go index 8008ada845b4..938d92b79239 100644 --- a/internal/catalog/internal/types/service_endpoints.go +++ b/internal/catalog/internal/types/service_endpoints.go @@ -126,7 +126,7 @@ func validateEndpoint(endpoint *pbcatalog.Endpoint, res *pbresource.Resource) er // Validate the endpoints ports for portName, port := range endpoint.Ports { // Port names must be DNS labels - if portNameErr := validatePortName(portName); portNameErr != nil { + if portNameErr := ValidatePortName(portName); portNameErr != nil { err = multierror.Append(err, resource.ErrInvalidMapKey{ Map: "ports", Key: portName, diff --git a/internal/catalog/internal/types/validators.go b/internal/catalog/internal/types/validators.go index 542bb705b5aa..f3b436329941 100644 --- a/internal/catalog/internal/types/validators.go +++ b/internal/catalog/internal/types/validators.go @@ -56,7 +56,7 @@ func isValidDNSLabel(label string) bool { return dnsLabelMatcher.Match([]byte(label)) } -func isValidUnixSocketPath(host string) bool { +func IsValidUnixSocketPath(host string) bool { if len(host) > maxUnixSocketPathLen || !strings.HasPrefix(host, "unix://") || strings.Contains(host, "\000") { return false } @@ -71,7 +71,7 @@ func validateWorkloadHost(host string) error { } // Check if the host represents an IP address, unix socket path or a DNS name - if !isValidIPAddress(host) && !isValidUnixSocketPath(host) && !isValidDNSName(host) { + if !isValidIPAddress(host) && !IsValidUnixSocketPath(host) && !isValidDNSName(host) { return errInvalidWorkloadHostFormat{Host: host} } @@ -139,7 +139,7 @@ func validateIPAddress(ip string) error { return nil } -func validatePortName(name string) error { +func ValidatePortName(name string) error { if name == "" { return resource.ErrEmpty } @@ -184,7 +184,7 @@ func validateWorkloadAddress(addr *pbcatalog.WorkloadAddress, ports map[string]* // Ensure that unix sockets reference exactly 1 port. They may also indirectly reference 1 port // by the workload having only a single port and omitting any explicit port assignment. - if isValidUnixSocketPath(addr.Host) && + if IsValidUnixSocketPath(addr.Host) && (len(addr.Ports) > 1 || (len(addr.Ports) == 0 && len(ports) > 1)) { err = multierror.Append(err, errUnixSocketMultiport) } diff --git a/internal/catalog/internal/types/validators_test.go b/internal/catalog/internal/types/validators_test.go index a8a8f740394d..ea3d2fa38ffe 100644 --- a/internal/catalog/internal/types/validators_test.go +++ b/internal/catalog/internal/types/validators_test.go @@ -178,7 +178,7 @@ func TestIsValidUnixSocketPath(t *testing.T) { for name, tcase := range cases { t.Run(name, func(t *testing.T) { - require.Equal(t, tcase.valid, isValidUnixSocketPath(tcase.name)) + require.Equal(t, tcase.valid, IsValidUnixSocketPath(tcase.name)) }) } } @@ -361,15 +361,15 @@ func TestValidatePortName(t *testing.T) { // test for the isValidDNSLabel function. t.Run("empty", func(t *testing.T) { - require.Equal(t, resource.ErrEmpty, validatePortName("")) + require.Equal(t, resource.ErrEmpty, ValidatePortName("")) }) t.Run("invalid", func(t *testing.T) { - require.Equal(t, errNotDNSLabel, validatePortName("foo.com")) + require.Equal(t, errNotDNSLabel, ValidatePortName("foo.com")) }) t.Run("ok", func(t *testing.T) { - require.NoError(t, validatePortName("http")) + require.NoError(t, ValidatePortName("http")) }) } diff --git a/internal/catalog/internal/types/workload.go b/internal/catalog/internal/types/workload.go index 961a85346c4f..89308667d2c1 100644 --- a/internal/catalog/internal/types/workload.go +++ b/internal/catalog/internal/types/workload.go @@ -44,7 +44,7 @@ func ValidateWorkload(res *pbresource.Resource) error { // Validate the Workload Ports for portName, port := range workload.Ports { - if portNameErr := validatePortName(portName); portNameErr != nil { + if portNameErr := ValidatePortName(portName); portNameErr != nil { err = multierror.Append(err, resource.ErrInvalidMapKey{ Map: "ports", Key: portName, diff --git a/internal/mesh/internal/controllers/sidecarproxy/controller_test.go b/internal/mesh/internal/controllers/sidecarproxy/controller_test.go index 883984f1615d..0d6684f0ef54 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/controller_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/controller_test.go @@ -553,7 +553,7 @@ func (suite *controllerTestSuite) TestController() { }).Write(suite.T(), suite.client) testutil.RunStep(suite.T(), "add explicit destinations and check that new proxy state is generated", func(t *testing.T) { - webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version) + webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) requireExplicitDestinationsFound(t, "api", webProxyStateTemplate) }) @@ -613,7 +613,7 @@ func (suite *controllerTestSuite) TestController() { }) // We should get a new web proxy template resource because this destination should be removed. - webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version) + webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) requireExplicitDestinationsNotFound(t, "api", webProxyStateTemplate) }) diff --git a/internal/mesh/internal/types/destinations.go b/internal/mesh/internal/types/destinations.go index 657aa33cb0a0..e4da2997e1da 100644 --- a/internal/mesh/internal/types/destinations.go +++ b/internal/mesh/internal/types/destinations.go @@ -4,6 +4,8 @@ package types import ( + "net" + "github.com/hashicorp/go-multierror" "google.golang.org/protobuf/proto" @@ -73,7 +75,6 @@ func ValidateDestinations(res *pbresource.Resource) error { var merr error - // Validate the workload selector if selErr := catalog.ValidateSelector(destinations.Workloads, false); selErr != nil { merr = multierror.Append(merr, resource.ErrInvalidField{ Name: "workloads", @@ -81,10 +82,17 @@ func ValidateDestinations(res *pbresource.Resource) error { }) } + if destinations.GetPqDestinations() != nil { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "pq_destinations", + Wrapped: resource.ErrUnsupported, + }) + } + for i, dest := range destinations.Destinations { wrapDestErr := func(err error) error { return resource.ErrInvalidListElement{ - Name: "upstreams", + Name: "destinations", Index: i, Wrapped: err, } @@ -101,8 +109,73 @@ func ValidateDestinations(res *pbresource.Resource) error { merr = multierror.Append(merr, refErr) } - // TODO(v2): validate port name using catalog validator - // TODO(v2): validate ListenAddr + if portErr := catalog.ValidatePortName(dest.DestinationPort); portErr != nil { + merr = multierror.Append(merr, wrapDestErr(resource.ErrInvalidField{ + Name: "destination_port", + Wrapped: portErr, + })) + } + + if dest.GetDatacenter() != "" { + merr = multierror.Append(merr, wrapDestErr(resource.ErrInvalidField{ + Name: "datacenter", + Wrapped: resource.ErrUnsupported, + })) + } + + if listenAddrErr := validateListenAddr(dest); listenAddrErr != nil { + merr = multierror.Append(merr, wrapDestErr(listenAddrErr)) + } + } + + return merr +} + +func validateListenAddr(dest *pbmesh.Destination) error { + var merr error + + if dest.GetListenAddr() == nil { + return multierror.Append(merr, resource.ErrInvalidFields{ + Names: []string{"ip_port", "unix"}, + Wrapped: resource.ErrMissingOneOf, + }) + } + + switch listenAddr := dest.GetListenAddr().(type) { + case *pbmesh.Destination_IpPort: + if ipPortErr := validateIPPort(listenAddr.IpPort); ipPortErr != nil { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "ip_port", + Wrapped: ipPortErr, + }) + } + case *pbmesh.Destination_Unix: + if !catalog.IsValidUnixSocketPath(listenAddr.Unix.GetPath()) { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "unix", + Wrapped: resource.ErrInvalidField{ + Name: "path", + Wrapped: errInvalidUnixSocketPath, + }, + }) + } + } + + return merr +} + +func validateIPPort(ipPort *pbmesh.IPPortAddress) error { + var merr error + + if listenPortErr := validatePort(ipPort.GetPort(), "port"); listenPortErr != nil { + merr = multierror.Append(merr, listenPortErr) + } + + if net.ParseIP(ipPort.GetIp()) == nil { + merr = multierror.Append(merr, resource.ErrInvalidField{ + Name: "ip", + Wrapped: errInvalidIP, + }) } return merr diff --git a/internal/mesh/internal/types/destinations_test.go b/internal/mesh/internal/types/destinations_test.go index 2601e884df07..ca0150a60c3d 100644 --- a/internal/mesh/internal/types/destinations_test.go +++ b/internal/mesh/internal/types/destinations_test.go @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/consul/sdk/testutil" ) -func TestMutateUpstreams(t *testing.T) { +func TestMutateDestinations(t *testing.T) { type testcase struct { tenancy *pbresource.Tenancy data *pbmesh.Destinations @@ -86,7 +86,7 @@ func TestMutateUpstreams(t *testing.T) { } } -func TestValidateUpstreams(t *testing.T) { +func TestValidateDestinations(t *testing.T) { type testcase struct { data *pbmesh.Destinations skipMutate bool @@ -151,7 +151,7 @@ func TestValidateUpstreams(t *testing.T) { {DestinationRef: nil}, }, }, - expectErr: `invalid element at index 0 of list "upstreams": invalid "destination_ref" field: missing required field`, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: missing required field`, }, "dest/bad type": { skipMutate: true, @@ -163,7 +163,7 @@ func TestValidateUpstreams(t *testing.T) { {DestinationRef: newRefWithTenancy(pbcatalog.WorkloadType, "default.default", "api")}, }, }, - expectErr: `invalid element at index 0 of list "upstreams": invalid "destination_ref" field: invalid "type" field: reference must have type catalog.v2beta1.Service`, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: invalid "type" field: reference must have type catalog.v2beta1.Service`, }, "dest/nil tenancy": { skipMutate: true, @@ -175,7 +175,7 @@ func TestValidateUpstreams(t *testing.T) { {DestinationRef: &pbresource.Reference{Type: pbcatalog.ServiceType, Name: "api"}}, }, }, - expectErr: `invalid element at index 0 of list "upstreams": invalid "destination_ref" field: invalid "tenancy" field: missing required field`, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: invalid "tenancy" field: missing required field`, }, "dest/bad dest tenancy/partition": { skipMutate: true, @@ -187,7 +187,7 @@ func TestValidateUpstreams(t *testing.T) { {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, ".bar", "api")}, }, }, - expectErr: `invalid element at index 0 of list "upstreams": invalid "destination_ref" field: invalid "tenancy" field: invalid "partition" field: cannot be empty`, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: invalid "tenancy" field: invalid "partition" field: cannot be empty`, }, "dest/bad dest tenancy/namespace": { skipMutate: true, @@ -199,7 +199,7 @@ func TestValidateUpstreams(t *testing.T) { {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo", "api")}, }, }, - expectErr: `invalid element at index 0 of list "upstreams": invalid "destination_ref" field: invalid "tenancy" field: invalid "namespace" field: cannot be empty`, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: invalid "tenancy" field: invalid "namespace" field: cannot be empty`, }, "dest/bad dest tenancy/peer_name": { skipMutate: true, @@ -213,17 +213,158 @@ func TestValidateUpstreams(t *testing.T) { Reference("")}, }, }, - expectErr: `invalid element at index 0 of list "upstreams": invalid "destination_ref" field: invalid "tenancy" field: invalid "peer_name" field: must be set to "local"`, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: invalid "tenancy" field: invalid "peer_name" field: must be set to "local"`, }, - "normal": { + "unsupported pq_destinations": { + skipMutate: true, data: &pbmesh.Destinations{ - Workloads: &pbcatalog.WorkloadSelector{ - Names: []string{"blah"}, + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, + PqDestinations: []*pbmesh.PreparedQueryDestination{ + {Name: "foo-query"}, }, + }, + expectErr: `invalid "pq_destinations" field: field is currently not supported`, + }, + "missing destination port": { + skipMutate: true, + data: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, Destinations: []*pbmesh.Destination{ - {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api")}, - {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.zim", "api")}, - {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "gir.zim", "api")}, + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "127.0.0.1", + Port: 1234, + }, + }, + }, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_port" field: cannot be empty`, + }, + "unsupported datacenter": { + skipMutate: true, + data: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPort: "p1", + Datacenter: "dc2", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "127.0.0.1", + Port: 1234, + }, + }, + }, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid "datacenter" field: field is currently not supported`, + }, + "missing listen addr": { + skipMutate: true, + data: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPort: "p1", + }, + }, + }, + expectErr: `invalid "ip_port,unix" fields: missing one of the required fields`, + }, + "invalid ip for listen addr": { + skipMutate: true, + data: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPort: "p1", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "invalid", + Port: 1234, + }, + }, + }, + }, + }, + expectErr: `invalid "ip" field: IP address is not valid`, + }, + "invalid port for listen addr": { + skipMutate: true, + data: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPort: "p1", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "127.0.0.1", + Port: 0, + }, + }, + }, + }, + }, + expectErr: `invalid "port" field: port number is outside the range 1 to 65535`, + }, + "invalid unix path for listen addr": { + skipMutate: true, + data: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPort: "p1", + ListenAddr: &pbmesh.Destination_Unix{ + Unix: &pbmesh.UnixSocketAddress{ + Path: "foo", + }, + }, + }, + }, + }, + expectErr: `invalid "unix" field: invalid "path" field: unix socket path is not valid`, + }, + "normal": { + data: &pbmesh.Destinations{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"foo"}}, + Destinations: []*pbmesh.Destination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPort: "p1", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "127.0.0.1", + Port: 1234, + }, + }, + }, + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.zim", "api"), + DestinationPort: "p2", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "127.0.0.1", + Port: 1235, + }, + }, + }, + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "gir.zim", "api"), + DestinationPort: "p3", + ListenAddr: &pbmesh.Destination_Unix{ + Unix: &pbmesh.UnixSocketAddress{ + Path: "unix://foo/bar", + }, + }, + }, }, }, }, @@ -234,9 +375,35 @@ func TestValidateUpstreams(t *testing.T) { Filter: "metadata.foo == bar", }, Destinations: []*pbmesh.Destination{ - {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api")}, - {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.zim", "api")}, - {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "gir.zim", "api")}, + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPort: "p1", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "127.0.0.1", + Port: 1234, + }, + }, + }, + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.zim", "api"), + DestinationPort: "p2", + ListenAddr: &pbmesh.Destination_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "127.0.0.1", + Port: 1235, + }, + }, + }, + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "gir.zim", "api"), + DestinationPort: "p3", + ListenAddr: &pbmesh.Destination_Unix{ + Unix: &pbmesh.UnixSocketAddress{ + Path: "unix://foo/bar", + }, + }, + }, }, }, }, diff --git a/internal/mesh/internal/types/errors.go b/internal/mesh/internal/types/errors.go index e38085b49fa2..bc9dacbbf0c4 100644 --- a/internal/mesh/internal/types/errors.go +++ b/internal/mesh/internal/types/errors.go @@ -9,6 +9,8 @@ import ( var ( errInvalidPort = errors.New("port number is outside the range 1 to 65535") + errInvalidIP = errors.New("IP address is not valid") + errInvalidUnixSocketPath = errors.New("unix socket path is not valid") errInvalidExposePathProtocol = errors.New("invalid protocol: only HTTP and HTTP2 protocols are allowed") errMissingProxyConfigData = errors.New("at least one of \"bootstrap_config\" or \"dynamic_config\" fields must be set") ) diff --git a/internal/resource/errors.go b/internal/resource/errors.go index 2003d86cbf71..2602e8c5b6ca 100644 --- a/internal/resource/errors.go +++ b/internal/resource/errors.go @@ -14,6 +14,7 @@ import ( var ( ErrMissing = NewConstError("missing required field") + ErrMissingOneOf = NewConstError("missing one of the required fields") ErrEmpty = NewConstError("cannot be empty") ErrReferenceTenancyNotEqual = NewConstError("resource tenancy and reference tenancy differ") ErrUnsupported = NewConstError("field is currently not supported") From 105ebfdd005a9ae2e58afd2572899662aaad9676 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Fri, 13 Oct 2023 17:16:26 -0600 Subject: [PATCH 006/130] catalog, mesh: implement missing ACL hooks (#19143) This change adds ACL hooks to the remaining catalog and mesh resources, excluding any computed ones. Those will for now continue using the default operator:x permissions. It refactors a lot of the common testing functions so that they can be re-used between resources. There are also some types that we don't yet support (e.g. virtual IPs) that this change adds ACL hooks to for future-proofing. --- agent/grpc-external/services/resource/read.go | 2 +- .../types/computed_traffic_permissions.go | 8 +- .../internal/types/traffic_permissions.go | 10 +- .../auth/internal/types/workload_identity.go | 12 +- .../internal/types/workload_identity_test.go | 4 +- .../helpers/acl_hooks_test_helpers.go | 21 +++ internal/catalog/exports.go | 6 + .../controllers/nodehealth/controller_test.go | 2 +- .../testhelpers/acl_hooks_test_helpers.go | 113 +++++++++++++ internal/catalog/internal/types/acl_hooks.go | 56 +++++++ internal/catalog/internal/types/dns_policy.go | 1 + .../catalog/internal/types/dns_policy_test.go | 17 ++ .../catalog/internal/types/failover_policy.go | 8 +- .../internal/types/failover_policy_test.go | 129 ++++---------- .../catalog/internal/types/health_checks.go | 1 + .../internal/types/health_checks_test.go | 10 ++ .../catalog/internal/types/health_status.go | 35 ++++ .../internal/types/health_status_test.go | 104 ++++++++++++ internal/catalog/internal/types/node.go | 14 ++ internal/catalog/internal/types/node_test.go | 46 +++++ internal/catalog/internal/types/service.go | 1 + .../internal/types/service_endpoints.go | 10 ++ .../internal/types/service_endpoints_test.go | 44 +++++ .../catalog/internal/types/service_test.go | 10 ++ internal/catalog/internal/types/types.go | 8 +- internal/catalog/internal/types/types_test.go | 4 +- .../catalog/internal/types/virtual_ips.go | 23 ++- .../internal/types/virtual_ips_test.go | 45 +++++ internal/catalog/internal/types/workload.go | 35 ++++ .../internal/types/workload_selecting.go | 16 ++ .../catalog/internal/types/workload_test.go | 158 ++++++++++++++++++ .../workload_selection_mapper.go | 14 +- .../mesh/internal/types/destination_policy.go | 8 +- internal/mesh/internal/types/destinations.go | 1 + .../types/destinations_configuration.go | 4 +- .../types/destinations_configuration_test.go | 10 ++ .../mesh/internal/types/destinations_test.go | 10 ++ .../internal/types/proxy_configuration.go | 5 +- .../types/proxy_configuration_test.go | 13 ++ .../internal/types/proxy_state_template.go | 6 +- internal/mesh/internal/types/types.go | 3 +- internal/mesh/internal/types/types_test.go | 3 +- internal/mesh/internal/types/xroute.go | 10 +- internal/mesh/internal/types/xroute_test.go | 2 +- internal/resource/acls.go | 13 ++ internal/resource/demo/demo.go | 2 +- internal/resource/errors.go | 14 ++ internal/resource/registry.go | 4 +- internal/resource/resourcetest/acls.go | 85 ++++++++++ internal/resource/resourcetest/tenancy.go | 15 ++ 50 files changed, 996 insertions(+), 179 deletions(-) create mode 100644 internal/catalog/catalogtest/helpers/acl_hooks_test_helpers.go create mode 100644 internal/catalog/internal/testhelpers/acl_hooks_test_helpers.go create mode 100644 internal/catalog/internal/types/acl_hooks.go create mode 100644 internal/catalog/internal/types/workload_selecting.go create mode 100644 internal/resource/acls.go create mode 100644 internal/resource/resourcetest/acls.go diff --git a/agent/grpc-external/services/resource/read.go b/agent/grpc-external/services/resource/read.go index 351a50385655..b6cec3725456 100644 --- a/agent/grpc-external/services/resource/read.go +++ b/agent/grpc-external/services/resource/read.go @@ -50,7 +50,7 @@ func (s *Server) Read(ctx context.Context, req *pbresource.ReadRequest) (*pbreso authzNeedsData := false err = reg.ACLs.Read(authz, authzContext, req.Id, nil) switch { - case errors.Is(err, resource.ErrNeedData): + case errors.Is(err, resource.ErrNeedResource): authzNeedsData = true err = nil case acl.IsErrPermissionDenied(err): diff --git a/internal/auth/internal/types/computed_traffic_permissions.go b/internal/auth/internal/types/computed_traffic_permissions.go index 0ba88427233b..0a32e13d2926 100644 --- a/internal/auth/internal/types/computed_traffic_permissions.go +++ b/internal/auth/internal/types/computed_traffic_permissions.go @@ -19,7 +19,7 @@ func RegisterComputedTrafficPermission(r resource.Registry) { ACLs: &resource.ACLHooks{ Read: aclReadHookComputedTrafficPermissions, Write: aclWriteHookComputedTrafficPermissions, - List: aclListHookComputedTrafficPermissions, + List: resource.NoOpACLListHook, }, Validate: ValidateComputedTrafficPermissions, Scope: resource.ScopeNamespace, @@ -71,9 +71,3 @@ func aclReadHookComputedTrafficPermissions(authorizer acl.Authorizer, authzConte func aclWriteHookComputedTrafficPermissions(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *pbresource.Resource) error { return authorizer.ToAllowAuthorizer().TrafficPermissionsWriteAllowed(res.Id.Name, authzContext) } - -func aclListHookComputedTrafficPermissions(_ acl.Authorizer, _ *acl.AuthorizerContext) error { - // No-op List permission as we want to default to filtering resources - // from the list using the Read enforcement - return nil -} diff --git a/internal/auth/internal/types/traffic_permissions.go b/internal/auth/internal/types/traffic_permissions.go index fb1de7acff90..78d53c70c628 100644 --- a/internal/auth/internal/types/traffic_permissions.go +++ b/internal/auth/internal/types/traffic_permissions.go @@ -19,7 +19,7 @@ func RegisterTrafficPermissions(r resource.Registry) { ACLs: &resource.ACLHooks{ Read: aclReadHookTrafficPermissions, Write: aclWriteHookTrafficPermissions, - List: aclListHookTrafficPermissions, + List: resource.NoOpACLListHook, }, Validate: ValidateTrafficPermissions, Mutate: MutateTrafficPermissions, @@ -273,7 +273,7 @@ func isLocalPeer(p string) bool { func aclReadHookTrafficPermissions(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, _ *pbresource.ID, res *pbresource.Resource) error { if res == nil { - return resource.ErrNeedData + return resource.ErrNeedResource } return authorizeDestination(res, func(dest string) error { return authorizer.ToAllowAuthorizer().TrafficPermissionsReadAllowed(dest, authzContext) @@ -286,12 +286,6 @@ func aclWriteHookTrafficPermissions(authorizer acl.Authorizer, authzContext *acl }) } -func aclListHookTrafficPermissions(_ acl.Authorizer, _ *acl.AuthorizerContext) error { - // No-op List permission as we want to default to filtering resources - // from the list using the Read enforcement - return nil -} - func authorizeDestination(res *pbresource.Resource, intentionAllowed func(string) error) error { tp, err := resource.Decode[*pbauth.TrafficPermissions](res) if err != nil { diff --git a/internal/auth/internal/types/workload_identity.go b/internal/auth/internal/types/workload_identity.go index 5379d256ba4f..17334e66099e 100644 --- a/internal/auth/internal/types/workload_identity.go +++ b/internal/auth/internal/types/workload_identity.go @@ -18,7 +18,7 @@ func RegisterWorkloadIdentity(r resource.Registry) { ACLs: &resource.ACLHooks{ Read: aclReadHookWorkloadIdentity, Write: aclWriteHookWorkloadIdentity, - List: aclListHookWorkloadIdentity, + List: resource.NoOpACLListHook, }, Validate: nil, }) @@ -36,7 +36,7 @@ func aclReadHookWorkloadIdentity( if res != nil { return authorizer.ToAllowAuthorizer().IdentityReadAllowed(res.Id.Name, authzCtx) } - return resource.ErrNeedData + return resource.ErrNeedResource } func aclWriteHookWorkloadIdentity( @@ -44,13 +44,7 @@ func aclWriteHookWorkloadIdentity( authzCtx *acl.AuthorizerContext, res *pbresource.Resource) error { if res == nil { - return resource.ErrNeedData + return resource.ErrNeedResource } return authorizer.ToAllowAuthorizer().IdentityWriteAllowed(res.Id.Name, authzCtx) } - -func aclListHookWorkloadIdentity(authorizer acl.Authorizer, context *acl.AuthorizerContext) error { - // No-op List permission as we want to default to filtering resources - // from the list using the Read enforcement - return nil -} diff --git a/internal/auth/internal/types/workload_identity_test.go b/internal/auth/internal/types/workload_identity_test.go index 1ca59952ecee..8dfb22bc74a2 100644 --- a/internal/auth/internal/types/workload_identity_test.go +++ b/internal/auth/internal/types/workload_identity_test.go @@ -82,8 +82,8 @@ func TestWorkloadIdentityACLs(t *testing.T) { checkF(t, tc.listOK, err) }) t.Run("errors", func(t *testing.T) { - require.ErrorIs(t, reg.ACLs.Read(authz, &acl.AuthorizerContext{}, nil, nil), resource.ErrNeedData) - require.ErrorIs(t, reg.ACLs.Write(authz, &acl.AuthorizerContext{}, nil), resource.ErrNeedData) + require.ErrorIs(t, reg.ACLs.Read(authz, &acl.AuthorizerContext{}, nil, nil), resource.ErrNeedResource) + require.ErrorIs(t, reg.ACLs.Write(authz, &acl.AuthorizerContext{}, nil), resource.ErrNeedResource) }) } diff --git a/internal/catalog/catalogtest/helpers/acl_hooks_test_helpers.go b/internal/catalog/catalogtest/helpers/acl_hooks_test_helpers.go new file mode 100644 index 000000000000..097647ed08d1 --- /dev/null +++ b/internal/catalog/catalogtest/helpers/acl_hooks_test_helpers.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package helpers + +import ( + "testing" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/catalog/internal/testhelpers" + "github.com/hashicorp/consul/internal/resource" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +func RunWorkloadSelectingTypeACLsTests[T catalog.WorkloadSelecting](t *testing.T, typ *pbresource.Type, + getData func(selector *pbcatalog.WorkloadSelector) T, + registerFunc func(registry resource.Registry), +) { + testhelpers.RunWorkloadSelectingTypeACLsTests[T](t, typ, getData, registerFunc) +} diff --git a/internal/catalog/exports.go b/internal/catalog/exports.go index b74149d2aec4..0391bea78442 100644 --- a/internal/catalog/exports.go +++ b/internal/catalog/exports.go @@ -48,6 +48,12 @@ var ( FailoverStatusConditionAcceptedUsingMeshDestinationPortReason = failover.UsingMeshDestinationPortReason ) +type WorkloadSelecting = types.WorkloadSelecting + +func ACLHooksForWorkloadSelectingType[T WorkloadSelecting]() *resource.ACLHooks { + return types.ACLHooksForWorkloadSelectingType[T]() +} + // RegisterTypes adds all resource types within the "catalog" API group // to the given type registry func RegisterTypes(r resource.Registry) { diff --git a/internal/catalog/internal/controllers/nodehealth/controller_test.go b/internal/catalog/internal/controllers/nodehealth/controller_test.go index 30989b479b0f..b21c52e521f8 100644 --- a/internal/catalog/internal/controllers/nodehealth/controller_test.go +++ b/internal/catalog/internal/controllers/nodehealth/controller_test.go @@ -73,7 +73,7 @@ type nodeHealthControllerTestSuite struct { } func (suite *nodeHealthControllerTestSuite) SetupTest() { - suite.resourceClient = svctest.RunResourceService(suite.T(), types.Register) + suite.resourceClient = svctest.RunResourceService(suite.T(), types.Register, types.RegisterDNSPolicy) suite.runtime = controller.Runtime{Client: suite.resourceClient, Logger: testutil.Logger(suite.T())} // The rest of the setup will be to prime the resource service with some data diff --git a/internal/catalog/internal/testhelpers/acl_hooks_test_helpers.go b/internal/catalog/internal/testhelpers/acl_hooks_test_helpers.go new file mode 100644 index 000000000000..c2e8947ac545 --- /dev/null +++ b/internal/catalog/internal/testhelpers/acl_hooks_test_helpers.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package testhelpers + +import ( + "testing" + + "google.golang.org/protobuf/proto" + + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// WorkloadSelecting denotes a resource type that uses workload selectors. +type WorkloadSelecting interface { + proto.Message + GetWorkloads() *pbcatalog.WorkloadSelector +} + +func RunWorkloadSelectingTypeACLsTests[T WorkloadSelecting](t *testing.T, typ *pbresource.Type, + getData func(selector *pbcatalog.WorkloadSelector) T, + registerFunc func(registry resource.Registry), +) { + // Wire up a registry to generically invoke hooks + registry := resource.NewRegistry() + registerFunc(registry) + + cases := map[string]resourcetest.ACLTestCase{ + "no rules": { + Rules: ``, + Data: getData(&pbcatalog.WorkloadSelector{Names: []string{"workload"}}), + Typ: typ, + ReadOK: resourcetest.DENY, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test read": { + Rules: `service "test" { policy = "read" }`, + Data: getData(&pbcatalog.WorkloadSelector{Names: []string{"workload"}}), + Typ: typ, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test write with named selectors and insufficient policy": { + Rules: `service "test" { policy = "write" }`, + Data: getData(&pbcatalog.WorkloadSelector{Names: []string{"workload"}}), + Typ: typ, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test write with prefixed selectors and insufficient policy": { + Rules: `service "test" { policy = "write" }`, + Data: getData(&pbcatalog.WorkloadSelector{Prefixes: []string{"workload"}}), + Typ: typ, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test write with named selectors": { + Rules: `service "test" { policy = "write" } service "workload" { policy = "read" }`, + Data: getData(&pbcatalog.WorkloadSelector{Names: []string{"workload"}}), + Typ: typ, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.ALLOW, + ListOK: resourcetest.DEFAULT, + }, + "service test write with prefixed selectors": { + Rules: `service "test" { policy = "write" } service_prefix "workload-" { policy = "read" }`, + Data: getData(&pbcatalog.WorkloadSelector{Prefixes: []string{"workload-"}}), + Typ: typ, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.ALLOW, + ListOK: resourcetest.DEFAULT, + }, + "service test write with prefixed selectors and a policy with more specific than the selector": { + Rules: `service "test" { policy = "write" } service_prefix "workload-" { policy = "read" }`, + Data: getData(&pbcatalog.WorkloadSelector{Prefixes: []string{"wor"}}), + Typ: typ, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test write with prefixed selectors and a policy with less specific than the selector": { + Rules: `service "test" { policy = "write" } service_prefix "wor" { policy = "read" }`, + Data: getData(&pbcatalog.WorkloadSelector{Prefixes: []string{"workload-"}}), + Typ: typ, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.ALLOW, + ListOK: resourcetest.DEFAULT, + }, + "service test write with prefixed selectors and a policy with a specific service": { + Rules: `service "test" { policy = "write" } service "workload" { policy = "read" }`, + Data: getData(&pbcatalog.WorkloadSelector{Prefixes: []string{"workload"}}), + Typ: typ, + ReadOK: resourcetest.ALLOW, + // TODO (ishustava): this is wrong and should be fixed in a follow up PR. We should not allow + // a policy for a specific service when only prefixes are specified in the selector. + WriteOK: resourcetest.ALLOW, + ListOK: resourcetest.DEFAULT, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + resourcetest.RunACLTestCase(t, tc, registry) + }) + } +} diff --git a/internal/catalog/internal/types/acl_hooks.go b/internal/catalog/internal/types/acl_hooks.go new file mode 100644 index 000000000000..3752e2b8de35 --- /dev/null +++ b/internal/catalog/internal/types/acl_hooks.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +func aclReadHookResourceWithWorkloadSelector(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, id *pbresource.ID, _ *pbresource.Resource) error { + return authorizer.ToAllowAuthorizer().ServiceReadAllowed(id.GetName(), authzContext) +} + +func aclWriteHookResourceWithWorkloadSelector[T WorkloadSelecting](authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *pbresource.Resource) error { + if res == nil { + return resource.ErrNeedResource + } + + decodedService, err := resource.Decode[T](res) + if err != nil { + return resource.ErrNeedResource + } + + // First check service:write on the name. + err = authorizer.ToAllowAuthorizer().ServiceWriteAllowed(res.GetId().GetName(), authzContext) + if err != nil { + return err + } + + // Then also check whether we're allowed to select a service. + for _, name := range decodedService.GetData().GetWorkloads().GetNames() { + err = authorizer.ToAllowAuthorizer().ServiceReadAllowed(name, authzContext) + if err != nil { + return err + } + } + + for _, prefix := range decodedService.GetData().GetWorkloads().GetPrefixes() { + err = authorizer.ToAllowAuthorizer().ServiceReadAllowed(prefix, authzContext) + if err != nil { + return err + } + } + + return nil +} + +func ACLHooksForWorkloadSelectingType[T WorkloadSelecting]() *resource.ACLHooks { + return &resource.ACLHooks{ + Read: aclReadHookResourceWithWorkloadSelector, + Write: aclWriteHookResourceWithWorkloadSelector[T], + List: resource.NoOpACLListHook, + } +} diff --git a/internal/catalog/internal/types/dns_policy.go b/internal/catalog/internal/types/dns_policy.go index 809dd6f880c2..8e9dd864a957 100644 --- a/internal/catalog/internal/types/dns_policy.go +++ b/internal/catalog/internal/types/dns_policy.go @@ -19,6 +19,7 @@ func RegisterDNSPolicy(r resource.Registry) { Proto: &pbcatalog.DNSPolicy{}, Scope: resource.ScopeNamespace, Validate: ValidateDNSPolicy, + ACLs: ACLHooksForWorkloadSelectingType[*pbcatalog.DNSPolicy](), }) } diff --git a/internal/catalog/internal/types/dns_policy_test.go b/internal/catalog/internal/types/dns_policy_test.go index 3a611171070a..1303d2878cf7 100644 --- a/internal/catalog/internal/types/dns_policy_test.go +++ b/internal/catalog/internal/types/dns_policy_test.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/known/anypb" + "github.com/hashicorp/consul/internal/catalog/internal/testhelpers" "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -161,3 +162,19 @@ func TestValidateDNSPolicy_EmptySelector(t *testing.T) { require.ErrorAs(t, err, &actual) require.Equal(t, expected, actual) } + +func TestDNSPolicyACLs(t *testing.T) { + // Wire up a registry to generically invoke hooks + registry := resource.NewRegistry() + RegisterDNSPolicy(registry) + + testhelpers.RunWorkloadSelectingTypeACLsTests[*pbcatalog.DNSPolicy](t, pbcatalog.DNSPolicyType, + func(selector *pbcatalog.WorkloadSelector) *pbcatalog.DNSPolicy { + return &pbcatalog.DNSPolicy{ + Workloads: selector, + Weights: &pbcatalog.Weights{Passing: 1, Warning: 0}, + } + }, + RegisterDNSPolicy, + ) +} diff --git a/internal/catalog/internal/types/failover_policy.go b/internal/catalog/internal/types/failover_policy.go index 620c3f590e97..047bb9a95b05 100644 --- a/internal/catalog/internal/types/failover_policy.go +++ b/internal/catalog/internal/types/failover_policy.go @@ -25,7 +25,7 @@ func RegisterFailoverPolicy(r resource.Registry) { ACLs: &resource.ACLHooks{ Read: aclReadHookFailoverPolicy, Write: aclWriteHookFailoverPolicy, - List: aclListHookFailoverPolicy, + List: resource.NoOpACLListHook, }, }) } @@ -371,9 +371,3 @@ func aclWriteHookFailoverPolicy(authorizer acl.Authorizer, authzContext *acl.Aut return nil } - -func aclListHookFailoverPolicy(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext) error { - // No-op List permission as we want to default to filtering resources - // from the list using the Read enforcement. - return nil -} diff --git a/internal/catalog/internal/types/failover_policy_test.go b/internal/catalog/internal/types/failover_policy_test.go index 8abe5d5cb74d..923d260580e0 100644 --- a/internal/catalog/internal/types/failover_policy_test.go +++ b/internal/catalog/internal/types/failover_policy_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" @@ -685,105 +683,52 @@ func TestFailoverPolicyACLs(t *testing.T) { registry := resource.NewRegistry() Register(registry) - type testcase struct { - rules string - check func(t *testing.T, authz acl.Authorizer, res *pbresource.Resource) - readOK string - writeOK string - listOK string - } - - const ( - DENY = "deny" - ALLOW = "allow" - DEFAULT = "default" - ) - - checkF := func(t *testing.T, expect string, got error) { - switch expect { - case ALLOW: - if acl.IsErrPermissionDenied(got) { - t.Fatal("should be allowed") - } - case DENY: - if !acl.IsErrPermissionDenied(got) { - t.Fatal("should be denied") - } - case DEFAULT: - require.Nil(t, got, "expected fallthrough decision") - default: - t.Fatalf("unexpected expectation: %q", expect) - } - } - - reg, ok := registry.Resolve(pbcatalog.FailoverPolicyType) - require.True(t, ok) - - run := func(t *testing.T, tc testcase) { - failoverData := &pbcatalog.FailoverPolicy{ - Config: &pbcatalog.FailoverConfig{ - Destinations: []*pbcatalog.FailoverDestination{ - {Ref: newRef(pbcatalog.ServiceType, "api-backup")}, - }, + failoverData := &pbcatalog.FailoverPolicy{ + Config: &pbcatalog.FailoverConfig{ + Destinations: []*pbcatalog.FailoverDestination{ + {Ref: newRef(pbcatalog.ServiceType, "api-backup")}, }, - } - res := resourcetest.Resource(pbcatalog.FailoverPolicyType, "api"). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, failoverData). - Build() - resourcetest.ValidateAndNormalize(t, registry, res) - - config := acl.Config{ - WildcardName: structs.WildcardSpecifier, - } - authz, err := acl.NewAuthorizerFromRules(tc.rules, &config, nil) - require.NoError(t, err) - authz = acl.NewChainedAuthorizer([]acl.Authorizer{authz, acl.DenyAll()}) - - t.Run("read", func(t *testing.T) { - err := reg.ACLs.Read(authz, &acl.AuthorizerContext{}, res.Id, nil) - checkF(t, tc.readOK, err) - }) - t.Run("write", func(t *testing.T) { - err := reg.ACLs.Write(authz, &acl.AuthorizerContext{}, res) - checkF(t, tc.writeOK, err) - }) - t.Run("list", func(t *testing.T) { - err := reg.ACLs.List(authz, &acl.AuthorizerContext{}) - checkF(t, tc.listOK, err) - }) + }, } - cases := map[string]testcase{ + cases := map[string]resourcetest.ACLTestCase{ "no rules": { - rules: ``, - readOK: DENY, - writeOK: DENY, - listOK: DEFAULT, - }, - "service api read": { - rules: `service "api" { policy = "read" }`, - readOK: ALLOW, - writeOK: DENY, - listOK: DEFAULT, - }, - "service api write": { - rules: `service "api" { policy = "write" }`, - readOK: ALLOW, - writeOK: DENY, - listOK: DEFAULT, - }, - "service api write and api-backup read": { - rules: `service "api" { policy = "write" } service "api-backup" { policy = "read" }`, - readOK: ALLOW, - writeOK: ALLOW, - listOK: DEFAULT, + Rules: ``, + Data: failoverData, + Typ: pbcatalog.FailoverPolicyType, + ReadOK: resourcetest.DENY, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test read": { + Rules: `service "test" { policy = "read" }`, + Data: failoverData, + Typ: pbcatalog.FailoverPolicyType, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test write": { + Rules: `service "test" { policy = "write" }`, + Data: failoverData, + Typ: pbcatalog.FailoverPolicyType, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test write and api-backup read": { + Rules: `service "test" { policy = "write" } service "api-backup" { policy = "read" }`, + Data: failoverData, + Typ: pbcatalog.FailoverPolicyType, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.ALLOW, + ListOK: resourcetest.DEFAULT, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { - run(t, tc) + resourcetest.RunACLTestCase(t, tc, registry) }) } } diff --git a/internal/catalog/internal/types/health_checks.go b/internal/catalog/internal/types/health_checks.go index b470be331feb..1333e2368d88 100644 --- a/internal/catalog/internal/types/health_checks.go +++ b/internal/catalog/internal/types/health_checks.go @@ -17,6 +17,7 @@ func RegisterHealthChecks(r resource.Registry) { Proto: &pbcatalog.HealthChecks{}, Scope: resource.ScopeNamespace, Validate: ValidateHealthChecks, + ACLs: ACLHooksForWorkloadSelectingType[*pbcatalog.HealthChecks](), }) } diff --git a/internal/catalog/internal/types/health_checks_test.go b/internal/catalog/internal/types/health_checks_test.go index 8af0ffde9fac..c9cdf01ae84c 100644 --- a/internal/catalog/internal/types/health_checks_test.go +++ b/internal/catalog/internal/types/health_checks_test.go @@ -12,6 +12,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/durationpb" + "github.com/hashicorp/consul/internal/catalog/internal/testhelpers" "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -196,3 +197,12 @@ func TestValidateHealthChecks_EmptySelector(t *testing.T) { require.ErrorAs(t, err, &actual) require.Equal(t, expected, actual) } + +func TestHealthChecksACLs(t *testing.T) { + testhelpers.RunWorkloadSelectingTypeACLsTests[*pbcatalog.HealthChecks](t, pbcatalog.HealthChecksType, + func(selector *pbcatalog.WorkloadSelector) *pbcatalog.HealthChecks { + return &pbcatalog.HealthChecks{Workloads: selector} + }, + RegisterHealthChecks, + ) +} diff --git a/internal/catalog/internal/types/health_status.go b/internal/catalog/internal/types/health_status.go index 99b153895c1f..fe92e858b025 100644 --- a/internal/catalog/internal/types/health_status.go +++ b/internal/catalog/internal/types/health_status.go @@ -6,6 +6,7 @@ package types import ( "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -17,6 +18,11 @@ func RegisterHealthStatus(r resource.Registry) { Proto: &pbcatalog.HealthStatus{}, Scope: resource.ScopeNamespace, Validate: ValidateHealthStatus, + ACLs: &resource.ACLHooks{ + Read: aclReadHookHealthStatus, + Write: aclWriteHookHealthStatus, + List: resource.NoOpACLListHook, + }, }) } @@ -66,3 +72,32 @@ func ValidateHealthStatus(res *pbresource.Resource) error { return err } + +func aclReadHookHealthStatus(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, _ *pbresource.ID, res *pbresource.Resource) error { + if res == nil { + return resource.ErrNeedResource + } + // For a health status of a workload we need to check service:read perms. + if res.GetOwner() != nil && resource.EqualType(res.GetOwner().GetType(), pbcatalog.WorkloadType) { + return authorizer.ToAllowAuthorizer().ServiceReadAllowed(res.GetOwner().GetName(), authzContext) + } + + if res.GetOwner() != nil && resource.EqualType(res.GetOwner().GetType(), pbcatalog.NodeType) { + return authorizer.ToAllowAuthorizer().NodeReadAllowed(res.GetOwner().GetName(), authzContext) + } + + return acl.PermissionDenied("cannot read catalog.HealthStatus because there is no owner") +} + +func aclWriteHookHealthStatus(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *pbresource.Resource) error { + // For a health status of a workload we need to check service:write perms. + if res.GetOwner() != nil && resource.EqualType(res.GetOwner().GetType(), pbcatalog.WorkloadType) { + return authorizer.ToAllowAuthorizer().ServiceWriteAllowed(res.GetOwner().GetName(), authzContext) + } + + if res.GetOwner() != nil && resource.EqualType(res.GetOwner().GetType(), pbcatalog.NodeType) { + return authorizer.ToAllowAuthorizer().NodeWriteAllowed(res.GetOwner().GetName(), authzContext) + } + + return acl.PermissionDenied("cannot write catalog.HealthStatus because there is no owner") +} diff --git a/internal/catalog/internal/types/health_status_test.go b/internal/catalog/internal/types/health_status_test.go index 654573d24a53..9482e4770e40 100644 --- a/internal/catalog/internal/types/health_status_test.go +++ b/internal/catalog/internal/types/health_status_test.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" ) @@ -214,3 +215,106 @@ func TestValidateHealthStatus_InvalidOwner(t *testing.T) { }) } } + +func TestHealthStatusACLs(t *testing.T) { + registry := resource.NewRegistry() + Register(registry) + + workload := resourcetest.Resource(pbcatalog.WorkloadType, "test").ID() + node := resourcetest.Resource(pbcatalog.NodeType, "test").ID() + + healthStatusData := &pbcatalog.HealthStatus{ + Type: "tcp", + Status: pbcatalog.Health_HEALTH_PASSING, + } + + cases := map[string]resourcetest.ACLTestCase{ + "no rules": { + Rules: ``, + Data: healthStatusData, + Owner: workload, + Typ: pbcatalog.HealthStatusType, + ReadOK: resourcetest.DENY, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test read": { + Rules: `service "test" { policy = "read" }`, + Data: healthStatusData, + Owner: workload, + Typ: pbcatalog.HealthStatusType, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test write": { + Rules: `service "test" { policy = "write" }`, + Data: healthStatusData, + Owner: workload, + Typ: pbcatalog.HealthStatusType, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.ALLOW, + ListOK: resourcetest.DEFAULT, + }, + "service test read with node owner": { + Rules: `service "test" { policy = "read" }`, + Data: healthStatusData, + Owner: node, + Typ: pbcatalog.HealthStatusType, + ReadOK: resourcetest.DENY, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "service test write with node owner": { + Rules: `service "test" { policy = "write" }`, + Data: healthStatusData, + Owner: node, + Typ: pbcatalog.HealthStatusType, + ReadOK: resourcetest.DENY, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "node test read with node owner": { + Rules: `node "test" { policy = "read" }`, + Data: healthStatusData, + Owner: node, + Typ: pbcatalog.HealthStatusType, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "node test write with node owner": { + Rules: `node "test" { policy = "write" }`, + Data: healthStatusData, + Owner: node, + Typ: pbcatalog.HealthStatusType, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.ALLOW, + ListOK: resourcetest.DEFAULT, + }, + "node test read with workload owner": { + Rules: `node "test" { policy = "read" }`, + Data: healthStatusData, + Owner: workload, + Typ: pbcatalog.HealthStatusType, + ReadOK: resourcetest.DENY, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "node test write with workload owner": { + Rules: `node "test" { policy = "write" }`, + Data: healthStatusData, + Owner: workload, + Typ: pbcatalog.HealthStatusType, + ReadOK: resourcetest.DENY, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + resourcetest.RunACLTestCase(t, tc, registry) + }) + } +} diff --git a/internal/catalog/internal/types/node.go b/internal/catalog/internal/types/node.go index 9c59228a49b6..42ac833c6e7d 100644 --- a/internal/catalog/internal/types/node.go +++ b/internal/catalog/internal/types/node.go @@ -6,6 +6,7 @@ package types import ( "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -22,6 +23,11 @@ func RegisterNode(r resource.Registry) { // Until that time, Node will remain namespace scoped. Scope: resource.ScopeNamespace, Validate: ValidateNode, + ACLs: &resource.ACLHooks{ + Read: aclReadHookNode, + Write: aclWriteHookNode, + List: resource.NoOpACLListHook, + }, }) } @@ -80,3 +86,11 @@ func validateNodeAddress(addr *pbcatalog.NodeAddress) error { return nil } + +func aclReadHookNode(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, id *pbresource.ID, _ *pbresource.Resource) error { + return authorizer.ToAllowAuthorizer().NodeReadAllowed(id.GetName(), authzContext) +} + +func aclWriteHookNode(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *pbresource.Resource) error { + return authorizer.ToAllowAuthorizer().NodeWriteAllowed(res.GetId().GetName(), authzContext) +} diff --git a/internal/catalog/internal/types/node_test.go b/internal/catalog/internal/types/node_test.go index 130551fad2b6..5a678745e3a3 100644 --- a/internal/catalog/internal/types/node_test.go +++ b/internal/catalog/internal/types/node_test.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" ) @@ -127,3 +128,48 @@ func TestValidateNode_AddressMissingHost(t *testing.T) { require.ErrorAs(t, err, &actual) require.Equal(t, expected, actual) } + +func TestNodeACLs(t *testing.T) { + registry := resource.NewRegistry() + Register(registry) + + nodeData := &pbcatalog.Node{ + Addresses: []*pbcatalog.NodeAddress{ + { + Host: "1.1.1.1", + }, + }, + } + cases := map[string]resourcetest.ACLTestCase{ + "no rules": { + Rules: ``, + Data: nodeData, + Typ: pbcatalog.NodeType, + ReadOK: resourcetest.DENY, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "node test read": { + Rules: `node "test" { policy = "read" }`, + Data: nodeData, + Typ: pbcatalog.NodeType, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.DENY, + ListOK: resourcetest.DEFAULT, + }, + "node test write": { + Rules: `node "test" { policy = "write" }`, + Data: nodeData, + Typ: pbcatalog.NodeType, + ReadOK: resourcetest.ALLOW, + WriteOK: resourcetest.ALLOW, + ListOK: resourcetest.DEFAULT, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + resourcetest.RunACLTestCase(t, tc, registry) + }) + } +} diff --git a/internal/catalog/internal/types/service.go b/internal/catalog/internal/types/service.go index 9fa703641c7f..ad9351f0d54f 100644 --- a/internal/catalog/internal/types/service.go +++ b/internal/catalog/internal/types/service.go @@ -20,6 +20,7 @@ func RegisterService(r resource.Registry) { Scope: resource.ScopeNamespace, Validate: ValidateService, Mutate: MutateService, + ACLs: ACLHooksForWorkloadSelectingType[*pbcatalog.Service](), }) } diff --git a/internal/catalog/internal/types/service_endpoints.go b/internal/catalog/internal/types/service_endpoints.go index 938d92b79239..14f055fcba77 100644 --- a/internal/catalog/internal/types/service_endpoints.go +++ b/internal/catalog/internal/types/service_endpoints.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -20,6 +21,15 @@ func RegisterServiceEndpoints(r resource.Registry) { Scope: resource.ScopeNamespace, Validate: ValidateServiceEndpoints, Mutate: MutateServiceEndpoints, + ACLs: &resource.ACLHooks{ + Read: func(authorizer acl.Authorizer, context *acl.AuthorizerContext, id *pbresource.ID, _ *pbresource.Resource) error { + return authorizer.ToAllowAuthorizer().ServiceReadAllowed(id.GetName(), context) + }, + Write: func(authorizer acl.Authorizer, context *acl.AuthorizerContext, p *pbresource.Resource) error { + return authorizer.ToAllowAuthorizer().ServiceWriteAllowed(p.GetId().GetName(), context) + }, + List: resource.NoOpACLListHook, + }, }) } diff --git a/internal/catalog/internal/types/service_endpoints_test.go b/internal/catalog/internal/types/service_endpoints_test.go index d210ba1aaafd..7a298e397949 100644 --- a/internal/catalog/internal/types/service_endpoints_test.go +++ b/internal/catalog/internal/types/service_endpoints_test.go @@ -258,3 +258,47 @@ func TestMutateServiceEndpoints_PopulateOwner(t *testing.T) { require.True(t, resource.EqualTenancy(res.Owner.Tenancy, defaultEndpointTenancy)) require.Equal(t, res.Owner.Name, res.Id.Name) } + +func TestServiceEndpointsACLs(t *testing.T) { + registry := resource.NewRegistry() + Register(registry) + + service := rtest.Resource(pbcatalog.ServiceType, "test"). + WithTenancy(resource.DefaultNamespacedTenancy()).ID() + serviceEndpointsData := &pbcatalog.ServiceEndpoints{} + cases := map[string]rtest.ACLTestCase{ + "no rules": { + Rules: ``, + Data: serviceEndpointsData, + Owner: service, + Typ: pbcatalog.ServiceEndpointsType, + ReadOK: rtest.DENY, + WriteOK: rtest.DENY, + ListOK: rtest.DEFAULT, + }, + "service test read": { + Rules: `service "test" { policy = "read" }`, + Data: serviceEndpointsData, + Owner: service, + Typ: pbcatalog.ServiceEndpointsType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.DENY, + ListOK: rtest.DEFAULT, + }, + "service test write": { + Rules: `service "test" { policy = "write" }`, + Data: serviceEndpointsData, + Owner: service, + Typ: pbcatalog.ServiceEndpointsType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.ALLOW, + ListOK: rtest.DEFAULT, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + rtest.RunACLTestCase(t, tc, registry) + }) + } +} diff --git a/internal/catalog/internal/types/service_test.go b/internal/catalog/internal/types/service_test.go index b47c02218405..18649dda9a0a 100644 --- a/internal/catalog/internal/types/service_test.go +++ b/internal/catalog/internal/types/service_test.go @@ -10,6 +10,7 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/known/anypb" + "github.com/hashicorp/consul/internal/catalog/internal/testhelpers" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" @@ -275,3 +276,12 @@ func TestValidateService_InvalidVIP(t *testing.T) { require.Error(t, err) require.ErrorIs(t, err, errNotIPAddress) } + +func TestServiceACLs(t *testing.T) { + testhelpers.RunWorkloadSelectingTypeACLsTests[*pbcatalog.Service](t, pbcatalog.ServiceType, + func(selector *pbcatalog.WorkloadSelector) *pbcatalog.Service { + return &pbcatalog.Service{Workloads: selector} + }, + RegisterService, + ) +} diff --git a/internal/catalog/internal/types/types.go b/internal/catalog/internal/types/types.go index 6ce29a265963..15ed6b148de7 100644 --- a/internal/catalog/internal/types/types.go +++ b/internal/catalog/internal/types/types.go @@ -13,8 +13,10 @@ func Register(r resource.Registry) { RegisterServiceEndpoints(r) RegisterNode(r) RegisterHealthStatus(r) - RegisterHealthChecks(r) - RegisterDNSPolicy(r) - RegisterVirtualIPs(r) RegisterFailoverPolicy(r) + + // todo (v2): re-register once these resources are implemented. + //RegisterHealthChecks(r) + //RegisterDNSPolicy(r) + //RegisterVirtualIPs(r) } diff --git a/internal/catalog/internal/types/types_test.go b/internal/catalog/internal/types/types_test.go index ba4243b62805..4facd921c368 100644 --- a/internal/catalog/internal/types/types_test.go +++ b/internal/catalog/internal/types/types_test.go @@ -24,9 +24,9 @@ func TestTypeRegistration(t *testing.T) { pbcatalog.ServiceEndpointsKind, pbcatalog.NodeKind, pbcatalog.HealthStatusKind, - pbcatalog.HealthChecksKind, - pbcatalog.DNSPolicyKind, // todo (ishustava): uncomment once we implement these + //pbcatalog.HealthChecksKind, + //pbcatalog.DNSPolicyKind, //pbcatalog.VirtualIPsKind, } diff --git a/internal/catalog/internal/types/virtual_ips.go b/internal/catalog/internal/types/virtual_ips.go index 7a4cee276ae6..9c7a06547405 100644 --- a/internal/catalog/internal/types/virtual_ips.go +++ b/internal/catalog/internal/types/virtual_ips.go @@ -6,19 +6,28 @@ package types import ( "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" ) func RegisterVirtualIPs(r resource.Registry) { - // todo (ishustava): uncomment when we implement it - //r.Register(resource.Registration{ - // Type: pbcatalog.VirtualIPsV2Beta1Type, - // Proto: &pbcatalog.VirtualIPs{}, - // Scope: resource.ScopeNamespace, - // Validate: ValidateVirtualIPs, - //}) + r.Register(resource.Registration{ + Type: pbcatalog.VirtualIPsType, + Proto: &pbcatalog.VirtualIPs{}, + Scope: resource.ScopeNamespace, + Validate: ValidateVirtualIPs, + ACLs: &resource.ACLHooks{ + Read: func(authorizer acl.Authorizer, context *acl.AuthorizerContext, id *pbresource.ID, p *pbresource.Resource) error { + return authorizer.ToAllowAuthorizer().ServiceReadAllowed(id.GetName(), context) + }, + Write: func(authorizer acl.Authorizer, context *acl.AuthorizerContext, p *pbresource.Resource) error { + return authorizer.ToAllowAuthorizer().ServiceWriteAllowed(p.GetId().GetName(), context) + }, + List: resource.NoOpACLListHook, + }, + }) } func ValidateVirtualIPs(res *pbresource.Resource) error { diff --git a/internal/catalog/internal/types/virtual_ips_test.go b/internal/catalog/internal/types/virtual_ips_test.go index c7ed70972530..0107e1cfd94f 100644 --- a/internal/catalog/internal/types/virtual_ips_test.go +++ b/internal/catalog/internal/types/virtual_ips_test.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/internal/resource" + rtest "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" ) @@ -81,3 +82,47 @@ func TestValidateVirtualIPs_InvalidIP(t *testing.T) { require.Error(t, err) require.ErrorIs(t, err, errNotIPAddress) } + +func TestVirtualIPsACLs(t *testing.T) { + registry := resource.NewRegistry() + RegisterVirtualIPs(registry) + + service := rtest.Resource(pbcatalog.ServiceType, "test"). + WithTenancy(resource.DefaultNamespacedTenancy()).ID() + virtualIPsData := &pbcatalog.VirtualIPs{} + cases := map[string]rtest.ACLTestCase{ + "no rules": { + Rules: ``, + Data: virtualIPsData, + Owner: service, + Typ: pbcatalog.VirtualIPsType, + ReadOK: rtest.DENY, + WriteOK: rtest.DENY, + ListOK: rtest.DEFAULT, + }, + "service test read": { + Rules: `service "test" { policy = "read" }`, + Data: virtualIPsData, + Owner: service, + Typ: pbcatalog.VirtualIPsType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.DENY, + ListOK: rtest.DEFAULT, + }, + "service test write": { + Rules: `service "test" { policy = "write" }`, + Data: virtualIPsData, + Owner: service, + Typ: pbcatalog.VirtualIPsType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.ALLOW, + ListOK: rtest.DEFAULT, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + rtest.RunACLTestCase(t, tc, registry) + }) + } +} diff --git a/internal/catalog/internal/types/workload.go b/internal/catalog/internal/types/workload.go index 89308667d2c1..db0175d46d1a 100644 --- a/internal/catalog/internal/types/workload.go +++ b/internal/catalog/internal/types/workload.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -20,6 +21,11 @@ func RegisterWorkload(r resource.Registry) { Proto: &pbcatalog.Workload{}, Scope: resource.ScopeNamespace, Validate: ValidateWorkload, + ACLs: &resource.ACLHooks{ + Read: aclReadHookWorkload, + Write: aclWriteHookWorkload, + List: resource.NoOpACLListHook, + }, }) } @@ -145,3 +151,32 @@ func ValidateWorkload(res *pbresource.Resource) error { return err } + +func aclReadHookWorkload(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, id *pbresource.ID, _ *pbresource.Resource) error { + return authorizer.ToAllowAuthorizer().ServiceReadAllowed(id.GetName(), authzContext) +} + +func aclWriteHookWorkload(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *pbresource.Resource) error { + decodedWorkload, err := resource.Decode[*pbcatalog.Workload](res) + if err != nil { + return resource.ErrNeedResource + } + + // First check service:write on the workload name. + err = authorizer.ToAllowAuthorizer().ServiceWriteAllowed(res.GetId().GetName(), authzContext) + if err != nil { + return err + } + + // Check node:read permissions if node is specified. + if decodedWorkload.GetData().GetNodeName() != "" { + return authorizer.ToAllowAuthorizer().NodeReadAllowed(decodedWorkload.GetData().GetNodeName(), authzContext) + } + + // Check identity:read permissions if identity is specified. + if decodedWorkload.GetData().GetIdentity() != "" { + return authorizer.ToAllowAuthorizer().IdentityReadAllowed(decodedWorkload.GetData().GetIdentity(), authzContext) + } + + return nil +} diff --git a/internal/catalog/internal/types/workload_selecting.go b/internal/catalog/internal/types/workload_selecting.go new file mode 100644 index 000000000000..6d129bfaa693 --- /dev/null +++ b/internal/catalog/internal/types/workload_selecting.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "google.golang.org/protobuf/proto" + + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" +) + +// WorkloadSelecting denotes a resource type that uses workload selectors. +type WorkloadSelecting interface { + proto.Message + GetWorkloads() *pbcatalog.WorkloadSelector +} diff --git a/internal/catalog/internal/types/workload_test.go b/internal/catalog/internal/types/workload_test.go index e55d9a44fd2b..1c7f7b825594 100644 --- a/internal/catalog/internal/types/workload_test.go +++ b/internal/catalog/internal/types/workload_test.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/internal/resource" + rtest "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" ) @@ -304,3 +305,160 @@ func TestValidateWorkload_Locality(t *testing.T) { require.ErrorAs(t, err, &actual) require.Equal(t, expected, actual) } + +func TestWorkloadACLs(t *testing.T) { + registry := resource.NewRegistry() + Register(registry) + + cases := map[string]rtest.ACLTestCase{ + "no rules": { + Rules: ``, + Data: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "1.1.1.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + }, + Typ: pbcatalog.WorkloadType, + ReadOK: rtest.DENY, + WriteOK: rtest.DENY, + ListOK: rtest.DEFAULT, + }, + "service test read": { + Rules: `service "test" { policy = "read" }`, + Data: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "1.1.1.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + }, + Typ: pbcatalog.WorkloadType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.DENY, + ListOK: rtest.DEFAULT, + }, + "service test write": { + Rules: `service "test" { policy = "write" }`, + Data: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "1.1.1.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + }, + Typ: pbcatalog.WorkloadType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.ALLOW, + ListOK: rtest.DEFAULT, + }, + "service test write with node": { + Rules: `service "test" { policy = "write" }`, + Data: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "1.1.1.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + NodeName: "test-node", + }, + Typ: pbcatalog.WorkloadType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.DENY, + ListOK: rtest.DEFAULT, + }, + "service test write with workload identity": { + Rules: `service "test" { policy = "write" }`, + Data: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "1.1.1.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + Identity: "test-identity", + }, + Typ: pbcatalog.WorkloadType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.DENY, + ListOK: rtest.DEFAULT, + }, + "service test write with workload identity and node": { + Rules: `service "test" { policy = "write" }`, + Data: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "1.1.1.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + NodeName: "test-node", + Identity: "test-identity", + }, + Typ: pbcatalog.WorkloadType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.DENY, + ListOK: rtest.DEFAULT, + }, + "service test write with node and node policy": { + Rules: `service "test" { policy = "write" } node "test-node" { policy = "read" }`, + Data: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "1.1.1.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + NodeName: "test-node", + }, + Typ: pbcatalog.WorkloadType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.ALLOW, + ListOK: rtest.DEFAULT, + }, + "service test write with workload identity and identity policy ": { + Rules: `service "test" { policy = "write" } identity "test-identity" { policy = "read" }`, + Data: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "1.1.1.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + Identity: "test-identity", + }, + Typ: pbcatalog.WorkloadType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.ALLOW, + ListOK: rtest.DEFAULT, + }, + "service test write with workload identity and node with both node and identity policy": { + Rules: `service "test" { policy = "write" } identity "test-identity" { policy = "read" } node "test-node" { policy = "read" }`, + Data: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "1.1.1.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + NodeName: "test-node", + Identity: "test-identity", + }, + Typ: pbcatalog.WorkloadType, + ReadOK: rtest.ALLOW, + WriteOK: rtest.ALLOW, + ListOK: rtest.DEFAULT, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + rtest.RunACLTestCase(t, tc, registry) + }) + } +} diff --git a/internal/mesh/internal/mappers/workloadselectionmapper/workload_selection_mapper.go b/internal/mesh/internal/mappers/workloadselectionmapper/workload_selection_mapper.go index 533474e6522a..7b064248414b 100644 --- a/internal/mesh/internal/mappers/workloadselectionmapper/workload_selection_mapper.go +++ b/internal/mesh/internal/mappers/workloadselectionmapper/workload_selection_mapper.go @@ -6,29 +6,21 @@ package workloadselectionmapper import ( "context" - "google.golang.org/protobuf/proto" - + "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/mesh/internal/mappers/common" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/mappers/selectiontracker" "github.com/hashicorp/consul/lib/stringslice" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" ) -// WorkloadSelecting denotes a resource type that uses workload selectors. -type WorkloadSelecting interface { - proto.Message - GetWorkloads() *pbcatalog.WorkloadSelector -} - -type Mapper[T WorkloadSelecting] struct { +type Mapper[T catalog.WorkloadSelecting] struct { workloadSelectionTracker *selectiontracker.WorkloadSelectionTracker computedType *pbresource.Type } -func New[T WorkloadSelecting](computedType *pbresource.Type) *Mapper[T] { +func New[T catalog.WorkloadSelecting](computedType *pbresource.Type) *Mapper[T] { if computedType == nil { panic("computed type is required") } diff --git a/internal/mesh/internal/types/destination_policy.go b/internal/mesh/internal/types/destination_policy.go index 75a6b9f18fb0..68b37345baf3 100644 --- a/internal/mesh/internal/types/destination_policy.go +++ b/internal/mesh/internal/types/destination_policy.go @@ -24,7 +24,7 @@ func RegisterDestinationPolicy(r resource.Registry) { ACLs: &resource.ACLHooks{ Read: aclReadHookDestinationPolicy, Write: aclWriteHookDestinationPolicy, - List: aclListHookDestinationPolicy, + List: resource.NoOpACLListHook, }, }) } @@ -233,9 +233,3 @@ func aclWriteHookDestinationPolicy(authorizer acl.Authorizer, authzContext *acl. // Check service:write permissions on the service this is controlling. return authorizer.ToAllowAuthorizer().ServiceWriteAllowed(serviceName, authzContext) } - -func aclListHookDestinationPolicy(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext) error { - // No-op List permission as we want to default to filtering resources - // from the list using the Read enforcement. - return nil -} diff --git a/internal/mesh/internal/types/destinations.go b/internal/mesh/internal/types/destinations.go index e4da2997e1da..34287e627ab7 100644 --- a/internal/mesh/internal/types/destinations.go +++ b/internal/mesh/internal/types/destinations.go @@ -22,6 +22,7 @@ func RegisterDestinations(r resource.Registry) { Scope: resource.ScopeNamespace, Mutate: MutateDestinations, Validate: ValidateDestinations, + ACLs: catalog.ACLHooksForWorkloadSelectingType[*pbmesh.Destinations](), }) } diff --git a/internal/mesh/internal/types/destinations_configuration.go b/internal/mesh/internal/types/destinations_configuration.go index b5de19d029b5..fedbe40df48c 100644 --- a/internal/mesh/internal/types/destinations_configuration.go +++ b/internal/mesh/internal/types/destinations_configuration.go @@ -7,17 +7,19 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" ) -func RegisterUpstreamsConfiguration(r resource.Registry) { +func RegisterDestinationsConfiguration(r resource.Registry) { r.Register(resource.Registration{ Type: pbmesh.DestinationsConfigurationType, Proto: &pbmesh.DestinationsConfiguration{}, Scope: resource.ScopeNamespace, Validate: ValidateDestinationsConfiguration, + ACLs: catalog.ACLHooksForWorkloadSelectingType[*pbmesh.DestinationsConfiguration](), }) } diff --git a/internal/mesh/internal/types/destinations_configuration_test.go b/internal/mesh/internal/types/destinations_configuration_test.go index 29c7c7cae0f1..11af0732d5d9 100644 --- a/internal/mesh/internal/types/destinations_configuration_test.go +++ b/internal/mesh/internal/types/destinations_configuration_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" + catalogtesthelpers "github.com/hashicorp/consul/internal/catalog/catalogtest/helpers" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" @@ -16,6 +17,15 @@ import ( "github.com/hashicorp/consul/sdk/testutil" ) +func TestDestinationsConfigurationACLs(t *testing.T) { + catalogtesthelpers.RunWorkloadSelectingTypeACLsTests[*pbmesh.DestinationsConfiguration](t, pbmesh.DestinationsConfigurationType, + func(selector *pbcatalog.WorkloadSelector) *pbmesh.DestinationsConfiguration { + return &pbmesh.DestinationsConfiguration{Workloads: selector} + }, + RegisterDestinationsConfiguration, + ) +} + func TestValidateDestinationsConfiguration(t *testing.T) { type testcase struct { data *pbmesh.DestinationsConfiguration diff --git a/internal/mesh/internal/types/destinations_test.go b/internal/mesh/internal/types/destinations_test.go index ca0150a60c3d..55624a6316f4 100644 --- a/internal/mesh/internal/types/destinations_test.go +++ b/internal/mesh/internal/types/destinations_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" + catalogtesthelpers "github.com/hashicorp/consul/internal/catalog/catalogtest/helpers" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" @@ -415,3 +416,12 @@ func TestValidateDestinations(t *testing.T) { }) } } + +func TestDestinationsACLs(t *testing.T) { + catalogtesthelpers.RunWorkloadSelectingTypeACLsTests[*pbmesh.Destinations](t, pbmesh.DestinationsType, + func(selector *pbcatalog.WorkloadSelector) *pbmesh.Destinations { + return &pbmesh.Destinations{Workloads: selector} + }, + RegisterDestinations, + ) +} diff --git a/internal/mesh/internal/types/proxy_configuration.go b/internal/mesh/internal/types/proxy_configuration.go index 0c9ac05147e1..081324d72167 100644 --- a/internal/mesh/internal/types/proxy_configuration.go +++ b/internal/mesh/internal/types/proxy_configuration.go @@ -6,10 +6,10 @@ package types import ( "math" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul/internal/resource" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -23,6 +23,7 @@ func RegisterProxyConfiguration(r resource.Registry) { Scope: resource.ScopeNamespace, Mutate: MutateProxyConfiguration, Validate: ValidateProxyConfiguration, + ACLs: catalog.ACLHooksForWorkloadSelectingType[*pbmesh.ProxyConfiguration](), }) } diff --git a/internal/mesh/internal/types/proxy_configuration_test.go b/internal/mesh/internal/types/proxy_configuration_test.go index c504ff2bd03d..f5c52d474c35 100644 --- a/internal/mesh/internal/types/proxy_configuration_test.go +++ b/internal/mesh/internal/types/proxy_configuration_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/types/known/structpb" + catalogtesthelpers "github.com/hashicorp/consul/internal/catalog/catalogtest/helpers" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" @@ -20,6 +21,18 @@ import ( "github.com/hashicorp/consul/sdk/testutil" ) +func TestProxyConfigurationACLs(t *testing.T) { + catalogtesthelpers.RunWorkloadSelectingTypeACLsTests[*pbmesh.ProxyConfiguration](t, pbmesh.ProxyConfigurationType, + func(selector *pbcatalog.WorkloadSelector) *pbmesh.ProxyConfiguration { + return &pbmesh.ProxyConfiguration{ + Workloads: selector, + DynamicConfig: &pbmesh.DynamicConfig{}, + } + }, + RegisterProxyConfiguration, + ) +} + func TestMutateProxyConfiguration(t *testing.T) { cases := map[string]struct { data *pbmesh.ProxyConfiguration diff --git a/internal/mesh/internal/types/proxy_state_template.go b/internal/mesh/internal/types/proxy_state_template.go index 010d0f9591b3..b84d0e9b45cb 100644 --- a/internal/mesh/internal/types/proxy_state_template.go +++ b/internal/mesh/internal/types/proxy_state_template.go @@ -44,11 +44,7 @@ func RegisterProxyStateTemplate(r resource.Registry) { // managed by a controller. return authorizer.ToAllowAuthorizer().OperatorWriteAllowed(authzContext) }, - List: func(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext) error { - // No-op List permission as we want to default to filtering resources - // from the list using the Read enforcement. - return nil - }, + List: resource.NoOpACLListHook, }, }) } diff --git a/internal/mesh/internal/types/types.go b/internal/mesh/internal/types/types.go index 20b23604fc6e..cf1443aabf18 100644 --- a/internal/mesh/internal/types/types.go +++ b/internal/mesh/internal/types/types.go @@ -12,11 +12,12 @@ func Register(r resource.Registry) { RegisterComputedProxyConfiguration(r) RegisterDestinations(r) RegisterComputedExplicitDestinations(r) - RegisterUpstreamsConfiguration(r) RegisterProxyStateTemplate(r) RegisterHTTPRoute(r) RegisterTCPRoute(r) RegisterGRPCRoute(r) RegisterDestinationPolicy(r) RegisterComputedRoutes(r) + // todo (v2): uncomment once we implement it. + //RegisterDestinationsConfiguration(r) } diff --git a/internal/mesh/internal/types/types_test.go b/internal/mesh/internal/types/types_test.go index 631e1b4be8d0..801d3de01846 100644 --- a/internal/mesh/internal/types/types_test.go +++ b/internal/mesh/internal/types/types_test.go @@ -21,13 +21,14 @@ func TestTypeRegistration(t *testing.T) { requiredKinds := []string{ pbmesh.ProxyConfigurationKind, pbmesh.DestinationsKind, - pbmesh.DestinationsConfigurationKind, pbmesh.ProxyStateTemplateKind, pbmesh.HTTPRouteKind, pbmesh.TCPRouteKind, pbmesh.GRPCRouteKind, pbmesh.DestinationPolicyKind, pbmesh.ComputedRoutesKind, + // todo (v2): re-enable once we implement it. + //pbmesh.DestinationsConfigurationKind, } r := resource.NewRegistry() diff --git a/internal/mesh/internal/types/xroute.go b/internal/mesh/internal/types/xroute.go index 1c60bdcb1c54..619c9cb68243 100644 --- a/internal/mesh/internal/types/xroute.go +++ b/internal/mesh/internal/types/xroute.go @@ -290,7 +290,7 @@ func xRouteACLHooks[R XRouteData]() *resource.ACLHooks { hooks := &resource.ACLHooks{ Read: aclReadHookXRoute[R], Write: aclWriteHookXRoute[R], - List: aclListHookXRoute[R], + List: resource.NoOpACLListHook, } return hooks @@ -298,7 +298,7 @@ func xRouteACLHooks[R XRouteData]() *resource.ACLHooks { func aclReadHookXRoute[R XRouteData](authorizer acl.Authorizer, _ *acl.AuthorizerContext, _ *pbresource.ID, res *pbresource.Resource) error { if res == nil { - return resource.ErrNeedData + return resource.ErrNeedResource } dec, err := resource.Decode[R](res) @@ -351,9 +351,3 @@ func aclWriteHookXRoute[R XRouteData](authorizer acl.Authorizer, _ *acl.Authoriz return nil } - -func aclListHookXRoute[R XRouteData](authorizer acl.Authorizer, authzContext *acl.AuthorizerContext) error { - // No-op List permission as we want to default to filtering resources - // from the list using the Read enforcement. - return nil -} diff --git a/internal/mesh/internal/types/xroute_test.go b/internal/mesh/internal/types/xroute_test.go index 09806bea8c7e..cd6d6d766327 100644 --- a/internal/mesh/internal/types/xroute_test.go +++ b/internal/mesh/internal/types/xroute_test.go @@ -458,7 +458,7 @@ func testXRouteACLs[R XRouteData](t *testing.T, newRoute func(t *testing.T, pare require.True(t, ok) err = reg.ACLs.Read(authz, &acl.AuthorizerContext{}, tc.res.Id, nil) - require.ErrorIs(t, err, resource.ErrNeedData, "read hook should require the data payload") + require.ErrorIs(t, err, resource.ErrNeedResource, "read hook should require the data payload") checkF(t, "read", tc.readOK, reg.ACLs.Read(authz, &acl.AuthorizerContext{}, tc.res.Id, tc.res)) checkF(t, "write", tc.writeOK, reg.ACLs.Write(authz, &acl.AuthorizerContext{}, tc.res)) diff --git a/internal/resource/acls.go b/internal/resource/acls.go new file mode 100644 index 000000000000..55a5872fc0de --- /dev/null +++ b/internal/resource/acls.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package resource + +import "github.com/hashicorp/consul/acl" + +// NoOpACLListHook is a common function that can be used if no special list permission is required for a resource. +func NoOpACLListHook(_ acl.Authorizer, _ *acl.AuthorizerContext) error { + // No-op List permission as we want to default to filtering resources + // from the list using the Read enforcement. + return nil +} diff --git a/internal/resource/demo/demo.go b/internal/resource/demo/demo.go index b6a9263842d3..8e978c9fb49a 100644 --- a/internal/resource/demo/demo.go +++ b/internal/resource/demo/demo.go @@ -97,7 +97,7 @@ func RegisterTypes(r resource.Registry) { readACL := func(authz acl.Authorizer, authzContext *acl.AuthorizerContext, id *pbresource.ID, res *pbresource.Resource) error { if resource.EqualType(TypeV1RecordLabel, id.Type) { if res == nil { - return resource.ErrNeedData + return resource.ErrNeedResource } } key := fmt.Sprintf("resource/%s/%s", resource.ToGVK(id.Type), id.Name) diff --git a/internal/resource/errors.go b/internal/resource/errors.go index 2602e8c5b6ca..24dd96e90ec4 100644 --- a/internal/resource/errors.go +++ b/internal/resource/errors.go @@ -137,6 +137,20 @@ type ErrOwnerTenantInvalid struct { } func (err ErrOwnerTenantInvalid) Error() string { + if err.ResourceTenancy == nil && err.OwnerTenancy != nil { + return fmt.Sprintf( + "empty resource tenancy cannot be owned by a resource in partition %s, namespace %s and peer %s", + err.OwnerTenancy.Partition, err.OwnerTenancy.Namespace, err.OwnerTenancy.PeerName, + ) + } + + if err.ResourceTenancy != nil && err.OwnerTenancy == nil { + return fmt.Sprintf( + "resource in partition %s, namespace %s and peer %s cannot be owned by a resource with empty tenancy", + err.ResourceTenancy.Partition, err.ResourceTenancy.Namespace, err.ResourceTenancy.PeerName, + ) + } + return fmt.Sprintf( "resource in partition %s, namespace %s and peer %s cannot be owned by a resource in partition %s, namespace %s and peer %s", err.ResourceTenancy.Partition, err.ResourceTenancy.Namespace, err.ResourceTenancy.PeerName, diff --git a/internal/resource/registry.go b/internal/resource/registry.go index 2b004b6b4c0c..20c1f4dc41a8 100644 --- a/internal/resource/registry.go +++ b/internal/resource/registry.go @@ -68,14 +68,14 @@ type Registration struct { Scope Scope } -var ErrNeedData = errors.New("authorization check requires resource data") +var ErrNeedResource = errors.New("authorization check requires the entire resource") type ACLHooks struct { // Read is used to authorize Read RPCs and to filter results in List // RPCs. // // It can be called an ID and possibly a Resource. The check will first - // attempt to use the ID and if the hook returns ErrNeedData, then the + // attempt to use the ID and if the hook returns ErrNeedResource, then the // check will be deferred until the data is fetched from the storage layer. // // If it is omitted, `operator:read` permission is assumed. diff --git a/internal/resource/resourcetest/acls.go b/internal/resource/resourcetest/acls.go new file mode 100644 index 000000000000..4aff9e30327b --- /dev/null +++ b/internal/resource/resourcetest/acls.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package resourcetest + +import ( + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +const ( + DENY = "deny" + ALLOW = "allow" + DEFAULT = "default" +) + +var checkF = func(t *testing.T, expect string, got error) { + switch expect { + case ALLOW: + if acl.IsErrPermissionDenied(got) { + t.Fatal("should be allowed") + } + case DENY: + if !acl.IsErrPermissionDenied(got) { + t.Fatal("should be denied") + } + case DEFAULT: + require.Nil(t, got, "expected fallthrough decision") + default: + t.Fatalf("unexpected expectation: %q", expect) + } +} + +type ACLTestCase struct { + Rules string + Data protoreflect.ProtoMessage + Owner *pbresource.ID + Typ *pbresource.Type + ReadOK string + WriteOK string + ListOK string +} + +func RunACLTestCase(t *testing.T, tc ACLTestCase, registry resource.Registry) { + reg, ok := registry.Resolve(tc.Typ) + require.True(t, ok) + + resolvedType, ok := registry.Resolve(tc.Typ) + require.True(t, ok) + + res := Resource(tc.Typ, "test"). + WithTenancy(DefaultTenancyForType(t, resolvedType)). + WithOwner(tc.Owner). + WithData(t, tc.Data). + Build() + + ValidateAndNormalize(t, registry, res) + + config := acl.Config{ + WildcardName: structs.WildcardSpecifier, + } + authz, err := acl.NewAuthorizerFromRules(tc.Rules, &config, nil) + require.NoError(t, err) + authz = acl.NewChainedAuthorizer([]acl.Authorizer{authz, acl.DenyAll()}) + + t.Run("read", func(t *testing.T) { + err := reg.ACLs.Read(authz, &acl.AuthorizerContext{}, res.Id, res) + checkF(t, tc.ReadOK, err) + }) + t.Run("write", func(t *testing.T) { + err := reg.ACLs.Write(authz, &acl.AuthorizerContext{}, res) + checkF(t, tc.WriteOK, err) + }) + t.Run("list", func(t *testing.T) { + err := reg.ACLs.List(authz, &acl.AuthorizerContext{}) + checkF(t, tc.ListOK, err) + }) +} diff --git a/internal/resource/resourcetest/tenancy.go b/internal/resource/resourcetest/tenancy.go index 838379cebbed..5f5c0525b6f4 100644 --- a/internal/resource/resourcetest/tenancy.go +++ b/internal/resource/resourcetest/tenancy.go @@ -5,6 +5,7 @@ package resourcetest import ( "strings" + "testing" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" @@ -35,3 +36,17 @@ func Tenancy(s string) *pbresource.Tenancy { return &pbresource.Tenancy{Partition: "BAD", Namespace: "BAD", PeerName: "BAD"} } } + +func DefaultTenancyForType(t *testing.T, reg resource.Registration) *pbresource.Tenancy { + switch reg.Scope { + case resource.ScopeNamespace: + return resource.DefaultNamespacedTenancy() + case resource.ScopePartition: + return resource.DefaultPartitionedTenancy() + case resource.ScopeCluster: + return resource.DefaultClusteredTenancy() + default: + t.Fatalf("unsupported resource scope: %v", reg.Scope) + return nil + } +} From a7fbd008652dd50beee51ad3fa5f6e617816baf9 Mon Sep 17 00:00:00 2001 From: John Murret Date: Sat, 14 Oct 2023 07:54:08 -0600 Subject: [PATCH 007/130] NET-5073 - ProxyConfiguration: implement various connection options (#19187) * NET-5073 - ProxyConfiguration: implement various connection options * PR feedback - LocalConnection and InboundConnection do not affect exposed routes. configure L7 route destinations. fix connection proto sequence numbers. * add timeout to L7 Route Destinations --- .../sidecarproxy/builder/expose_paths.go | 2 +- .../sidecarproxy/builder/local_app.go | 85 ++++- .../sidecarproxy/builder/local_app_test.go | 70 +++- .../local-and-inbound-connections.golden | 300 ++++++++++++++++++ proto-public/pbmesh/v2beta1/connection.pb.go | 10 +- proto-public/pbmesh/v2beta1/connection.proto | 4 +- 6 files changed, 444 insertions(+), 27 deletions(-) create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections.golden diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/expose_paths.go b/internal/mesh/internal/controllers/sidecarproxy/builder/expose_paths.go index e5baaa2cba59..6215ab540ca5 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/expose_paths.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/expose_paths.go @@ -22,7 +22,7 @@ func (b *Builder) buildExposePaths(workload *pbcatalog.Workload) { buildListener() b.addExposePathsRoute(exposePath, clusterName). - addLocalAppCluster(clusterName). + addLocalAppCluster(clusterName, nil). addLocalAppStaticEndpoints(clusterName, exposePath.LocalPathPort) } } diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go index e6ba896b68e0..54ea6db57816 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go @@ -6,6 +6,9 @@ package builder import ( "fmt" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "google.golang.org/protobuf/types/known/wrapperspb" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/envoyextensions/xdscommon" pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" @@ -31,13 +34,13 @@ func (b *Builder) BuildLocalApp(workload *pbcatalog.Workload, ctp *pbauth.Comput if port.Protocol != pbcatalog.Protocol_PROTOCOL_MESH { foundInboundNonMeshPorts = true - lb.addInboundRouter(clusterName, routeName, port, portName, trafficPermissions[portName]). + lb.addInboundRouter(clusterName, routeName, port, portName, trafficPermissions[portName], b.proxyCfg.GetDynamicConfig().GetInboundConnections()). addInboundTLS() if isL7(port.Protocol) { - b.addLocalAppRoute(routeName, clusterName) + b.addLocalAppRoute(routeName, clusterName, portName) } - b.addLocalAppCluster(clusterName). + b.addLocalAppCluster(clusterName, &portName). addLocalAppStaticEndpoints(clusterName, port.GetPort()) } } @@ -264,10 +267,16 @@ func (b *Builder) addInboundListener(name string, workload *pbcatalog.Workload) // Add TLS inspection capability to be able to parse ALPN and/or SNI information from inbound connections. listener.Capabilities = append(listener.Capabilities, pbproxystate.Capability_CAPABILITY_L4_TLS_INSPECTION) + if b.proxyCfg.GetDynamicConfig() != nil && b.proxyCfg.GetDynamicConfig().InboundConnections != nil { + listener.BalanceConnections = pbproxystate.BalanceConnections(b.proxyCfg.DynamicConfig.InboundConnections.BalanceInboundConnections) + } return b.NewListenerBuilder(listener) } -func (l *ListenerBuilder) addInboundRouter(clusterName string, routeName string, port *pbcatalog.WorkloadPort, portName string, tp *pbproxystate.TrafficPermissions) *ListenerBuilder { +func (l *ListenerBuilder) addInboundRouter(clusterName string, routeName string, + port *pbcatalog.WorkloadPort, portName string, tp *pbproxystate.TrafficPermissions, + ic *pbmesh.InboundConnectionsConfig) *ListenerBuilder { + if l.listener == nil { return l } @@ -289,6 +298,15 @@ func (l *ListenerBuilder) addInboundRouter(clusterName string, routeName string, AlpnProtocols: []string{getAlpnProtocolFromPortName(portName)}, }, } + + if ic != nil { + // MaxInboundConnections is uint32 that is used on: + // - router destinations MaxInboundConnection (uint64). + // - cluster circuit breakers UpstreamLimits.MaxConnections (uint32). + // It is cast to a uint64 here similarly as it is to the proxystateconverter code. + r.GetL4().MaxInboundConnections = uint64(ic.MaxInboundConnections) + } + l.listener.Routers = append(l.listener.Routers, r) } else if isL7(port.Protocol) { r := &pbproxystate.Router{ @@ -308,6 +326,13 @@ func (l *ListenerBuilder) addInboundRouter(clusterName string, routeName string, AlpnProtocols: []string{getAlpnProtocolFromPortName(portName)}, }, } + + if ic != nil { + // MaxInboundConnections is cast to a uint64 here similarly as it is to the + // as the L4 case statement above and in proxystateconverter code. + r.GetL7().MaxInboundConnections = uint64(ic.MaxInboundConnections) + } + l.listener.Routers = append(l.listener.Routers, r) } return l @@ -339,7 +364,7 @@ func getAlpnProtocolFromPortName(portName string) string { return fmt.Sprintf("consul~%s", portName) } -func (b *Builder) addLocalAppRoute(routeName string, clusterName string) { +func (b *Builder) addLocalAppRoute(routeName, clusterName, portName string) { proxyRouteRule := &pbproxystate.RouteRule{ Match: &pbproxystate.RouteMatch{ PathMatch: &pbproxystate.PathMatch{ @@ -356,6 +381,18 @@ func (b *Builder) addLocalAppRoute(routeName string, clusterName string) { }, }, } + if b.proxyCfg.GetDynamicConfig() != nil && b.proxyCfg.GetDynamicConfig().LocalConnection != nil { + lc, lcOK := b.proxyCfg.GetDynamicConfig().LocalConnection[portName] + if lcOK { + proxyRouteRule.Destination.DestinationConfiguration = + &pbproxystate.DestinationConfiguration{ + TimeoutConfig: &pbproxystate.TimeoutConfig{ + Timeout: lc.RequestTimeout, + }, + } + } + } + // Each route name for the local app is listenerName:port since there is a route per port on the local app listener. b.addRoute(routeName, &pbproxystate.Route{ VirtualHosts: []*pbproxystate.VirtualHost{{ @@ -373,9 +410,9 @@ func isL7(protocol pbcatalog.Protocol) bool { return false } -func (b *Builder) addLocalAppCluster(clusterName string) *Builder { +func (b *Builder) addLocalAppCluster(clusterName string, portName *string) *Builder { // Make cluster for this router destination. - b.proxyStateTemplate.ProxyState.Clusters[clusterName] = &pbproxystate.Cluster{ + cluster := &pbproxystate.Cluster{ Group: &pbproxystate.Cluster_EndpointGroup{ EndpointGroup: &pbproxystate.EndpointGroup{ Group: &pbproxystate.EndpointGroup_Static{ @@ -384,20 +421,34 @@ func (b *Builder) addLocalAppCluster(clusterName string) *Builder { }, }, } + + // configure inbound connections or connection timeout if either is defined + if b.proxyCfg.GetDynamicConfig() != nil && portName != nil { + lc, lcOK := b.proxyCfg.DynamicConfig.LocalConnection[*portName] + + if lcOK || b.proxyCfg.DynamicConfig.InboundConnections != nil { + cluster.GetEndpointGroup().GetStatic().Config = &pbproxystate.StaticEndpointGroupConfig{} + + if lcOK { + cluster.GetEndpointGroup().GetStatic().GetConfig().ConnectTimeout = lc.ConnectTimeout + } + + if b.proxyCfg.DynamicConfig.InboundConnections != nil { + cluster.GetEndpointGroup().GetStatic().GetConfig().CircuitBreakers = &pbproxystate.CircuitBreakers{ + UpstreamLimits: &pbproxystate.UpstreamLimits{ + MaxConnections: &wrapperspb.UInt32Value{Value: b.proxyCfg.DynamicConfig.InboundConnections.MaxInboundConnections}, + }, + } + } + } + } + + b.proxyStateTemplate.ProxyState.Clusters[clusterName] = cluster return b } func (b *Builder) addBlackHoleCluster() *Builder { - b.proxyStateTemplate.ProxyState.Clusters[xdscommon.BlackHoleClusterName] = &pbproxystate.Cluster{ - Group: &pbproxystate.Cluster_EndpointGroup{ - EndpointGroup: &pbproxystate.EndpointGroup{ - Group: &pbproxystate.EndpointGroup_Static{ - Static: &pbproxystate.StaticEndpointGroup{}, - }, - }, - }, - } - return b + return b.addLocalAppCluster(xdscommon.BlackHoleClusterName, nil) } func (b *Builder) addLocalAppStaticEndpoints(clusterName string, port uint32) { diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go index 785e0aab2e2e..33dcab715522 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go @@ -4,7 +4,10 @@ package builder import ( + "google.golang.org/protobuf/types/known/durationpb" + "sort" "testing" + "time" "github.com/stretchr/testify/require" @@ -139,6 +142,55 @@ func TestBuildLocalApp_WithProxyConfiguration(t *testing.T) { }, }, }, + // source/local-and-inbound-connections shows that configuring LocalCOnnection + // and InboundConnections in DynamicConfig will set fields on standard clusters and routes, + // but will not set fields on exposed path clusters and routes. + "source/local-and-inbound-connections": { + workload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + "port3": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + }, + }, + proxyCfg: &pbmesh.ComputedProxyConfiguration{ + DynamicConfig: &pbmesh.DynamicConfig{ + LocalConnection: map[string]*pbmesh.ConnectionConfig{ + "port1": { + ConnectTimeout: durationpb.New(6 * time.Second), + RequestTimeout: durationpb.New(7 * time.Second)}, + "port3": { + ConnectTimeout: durationpb.New(8 * time.Second), + RequestTimeout: durationpb.New(9 * time.Second)}, + }, + InboundConnections: &pbmesh.InboundConnectionsConfig{ + MaxInboundConnections: 123, + BalanceInboundConnections: pbmesh.BalanceConnections(pbproxystate.BalanceConnections_BALANCE_CONNECTIONS_EXACT), + }, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 1234, + Path: "/health", + LocalPathPort: 9090, + Protocol: pbmesh.ExposePathProtocol_EXPOSE_PATH_PROTOCOL_HTTP, + }, + { + ListenerPort: 1235, + Path: "GetHealth", + LocalPathPort: 9091, + Protocol: pbmesh.ExposePathProtocol_EXPOSE_PATH_PROTOCOL_HTTP2, + }, + }, + }, + }, + }, + }, } for name, c := range cases { @@ -146,10 +198,24 @@ func TestBuildLocalApp_WithProxyConfiguration(t *testing.T) { proxyTmpl := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, c.proxyCfg). BuildLocalApp(c.workload, nil). Build() + + // sort routers because of test flakes where order was flip flopping. + actualRouters := proxyTmpl.ProxyState.Listeners[0].Routers + sort.Slice(actualRouters, func(i, j int) bool { + return actualRouters[i].String() < actualRouters[j].String() + }) + actual := protoToJSON(t, proxyTmpl) - expected := golden.Get(t, actual, name+".golden") + expected := JSONToProxyTemplate(t, golden.GetBytes(t, actual, name+".golden")) - require.JSONEq(t, expected, actual) + // sort routers on listener from golden file + expectedRouters := expected.ProxyState.Listeners[0].Routers + sort.Slice(expectedRouters, func(i, j int) bool { + return expectedRouters[i].String() < expectedRouters[j].String() + }) + + // convert back to json after sorting so that test output does not contain extraneous fields. + require.Equal(t, protoToJSON(t, expected), protoToJSON(t, proxyTmpl)) }) } } diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections.golden new file mode 100644 index 000000000000..169a4969aefd --- /dev/null +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections.golden @@ -0,0 +1,300 @@ +{ + "proxyState": { + "clusters": { + "exposed_cluster_9090": { + "endpointGroup": { + "static": {} + }, + "name": "exposed_cluster_9090" + }, + "exposed_cluster_9091": { + "endpointGroup": { + "static": {} + }, + "name": "exposed_cluster_9091" + }, + "local_app:port1": { + "endpointGroup": { + "static": { + "config": { + "connectTimeout": "6s", + "circuitBreakers": { + "upstreamLimits": { + "maxConnections": 123 + } + } + } + } + }, + "name": "local_app:port1" + }, + "local_app:port3": { + "endpointGroup": { + "static": { + "config": { + "connectTimeout": "8s", + "circuitBreakers": { + "upstreamLimits": { + "maxConnections": 123 + } + } + } + } + }, + "name": "local_app:port3" + } + }, + "endpoints": { + "exposed_cluster_9090": { + "endpoints": [ + { + "hostPort": { + "host": "127.0.0.1", + "port": 9090 + } + } + ] + }, + "exposed_cluster_9091": { + "endpoints": [ + { + "hostPort": { + "host": "127.0.0.1", + "port": 9091 + } + } + ] + }, + "local_app:port1": { + "endpoints": [ + { + "hostPort": { + "host": "127.0.0.1", + "port": 8080 + } + } + ] + }, + "local_app:port3": { + "endpoints": [ + { + "hostPort": { + "host": "127.0.0.1", + "port": 8081 + } + } + ] + } + }, + "identity": { + "name": "test-identity", + "tenancy": { + "namespace": "default", + "partition": "default", + "peerName": "local" + }, + "type": { + "group": "auth", + "groupVersion": "v2beta1", + "kind": "WorkloadIdentity" + } + }, + "listeners": [ + { + "capabilities": [ + "CAPABILITY_L4_TLS_INSPECTION" + ], + "direction": "DIRECTION_INBOUND", + "hostPort": { + "host": "10.0.0.1", + "port": 20000 + }, + "name": "public_listener", + "balanceConnections": "BALANCE_CONNECTIONS_EXACT", + "routers": [ + { + "inboundTls": { + "inboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "trustBundlePeerNameKeys": [ + "local" + ] + } + } + }, + "l4": { + "cluster": { + "name": "local_app:port1" + }, + "maxInboundConnections": 123, + "statPrefix": "public_listener", + "trafficPermissions": {} + }, + "match": { + "alpnProtocols": [ + "consul~port1" + ] + } + }, + { + "inboundTls": { + "inboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "trustBundlePeerNameKeys": [ + "local" + ] + } + } + }, + "l7": { + "route": { + "name": "public_listener:port3" + }, + "maxInboundConnections": 123, + "statPrefix": "public_listener", + "staticRoute": true, + "trafficPermissions": {} + }, + "match": { + "alpnProtocols": [ + "consul~port3" + ] + } + } + ] + }, + { + "direction": "DIRECTION_INBOUND", + "hostPort": { + "host": "10.0.0.1", + "port": 9090 + }, + "name": "exposed_path_health", + "routers": [ + { + "l7": { + "route": { + "name": "exposed_path_filter_health_1234" + }, + "statPrefix": "exposed_path_filter_health_1234", + "staticRoute": true + } + } + ] + }, + { + "direction": "DIRECTION_INBOUND", + "hostPort": { + "host": "10.0.0.1", + "port": 9091 + }, + "name": "exposed_path_GetHealth", + "routers": [ + { + "l7": { + "protocol": "L7_PROTOCOL_HTTP2", + "route": { + "name": "exposed_path_filter_GetHealth_1235" + }, + "statPrefix": "exposed_path_filter_GetHealth_1235", + "staticRoute": true + } + } + ] + } + ], + "routes": { + "exposed_path_filter_GetHealth_1235": { + "virtualHosts": [ + { + "domains": [ + "*" + ], + "name": "exposed_path_filter_GetHealth_1235", + "routeRules": [ + { + "destination": { + "cluster": { + "name": "exposed_cluster_9091" + } + }, + "match": { + "pathMatch": { + "exact": "GetHealth" + } + } + } + ] + } + ] + }, + "exposed_path_filter_health_1234": { + "virtualHosts": [ + { + "domains": [ + "*" + ], + "name": "exposed_path_filter_health_1234", + "routeRules": [ + { + "destination": { + "cluster": { + "name": "exposed_cluster_9090" + } + }, + "match": { + "pathMatch": { + "exact": "/health" + } + } + } + ] + } + ] + }, + "public_listener:port3": { + "virtualHosts": [ + { + "domains": [ + "*" + ], + "name": "public_listener:port3", + "routeRules": [ + { + "destination": { + "cluster": { + "name": "local_app:port3" + }, + "destinationConfiguration": { + "timeoutConfig": { + "timeout": "9s" + } + } + }, + "match": { + "pathMatch": { + "prefix": "/" + } + } + } + ] + } + ] + } + } + }, + "requiredLeafCertificates": { + "test-identity": { + "name": "test-identity", + "namespace": "default", + "partition": "default" + } + }, + "requiredTrustBundles": { + "local": { + "peer": "local" + } + } +} \ No newline at end of file diff --git a/proto-public/pbmesh/v2beta1/connection.pb.go b/proto-public/pbmesh/v2beta1/connection.pb.go index d8ccaba7d74e..591df222fc93 100644 --- a/proto-public/pbmesh/v2beta1/connection.pb.go +++ b/proto-public/pbmesh/v2beta1/connection.pb.go @@ -137,8 +137,8 @@ type InboundConnectionsConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - MaxInboundConnections uint64 `protobuf:"varint,12,opt,name=max_inbound_connections,json=maxInboundConnections,proto3" json:"max_inbound_connections,omitempty"` - BalanceInboundConnections BalanceConnections `protobuf:"varint,13,opt,name=balance_inbound_connections,json=balanceInboundConnections,proto3,enum=hashicorp.consul.mesh.v2beta1.BalanceConnections" json:"balance_inbound_connections,omitempty"` + MaxInboundConnections uint32 `protobuf:"varint,1,opt,name=max_inbound_connections,json=maxInboundConnections,proto3" json:"max_inbound_connections,omitempty"` + BalanceInboundConnections BalanceConnections `protobuf:"varint,2,opt,name=balance_inbound_connections,json=balanceInboundConnections,proto3,enum=hashicorp.consul.mesh.v2beta1.BalanceConnections" json:"balance_inbound_connections,omitempty"` } func (x *InboundConnectionsConfig) Reset() { @@ -173,7 +173,7 @@ func (*InboundConnectionsConfig) Descriptor() ([]byte, []int) { return file_pbmesh_v2beta1_connection_proto_rawDescGZIP(), []int{1} } -func (x *InboundConnectionsConfig) GetMaxInboundConnections() uint64 { +func (x *InboundConnectionsConfig) GetMaxInboundConnections() uint32 { if x != nil { return x.MaxInboundConnections } @@ -209,11 +209,11 @@ var file_pbmesh_v2beta1_connection_proto_rawDesc = []byte{ 0x0a, 0x18, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, 0x6d, 0x61, 0x78, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x71, 0x0a, 0x1b, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x19, 0x62, 0x61, 0x6c, 0x61, diff --git a/proto-public/pbmesh/v2beta1/connection.proto b/proto-public/pbmesh/v2beta1/connection.proto index fdb7217a123b..65cb21e586dd 100644 --- a/proto-public/pbmesh/v2beta1/connection.proto +++ b/proto-public/pbmesh/v2beta1/connection.proto @@ -17,8 +17,8 @@ message ConnectionConfig { // Referenced by ProxyConfiguration message InboundConnectionsConfig { - uint64 max_inbound_connections = 12; - BalanceConnections balance_inbound_connections = 13; + uint32 max_inbound_connections = 1; + BalanceConnections balance_inbound_connections = 2; } // +kubebuilder:validation:Enum=BALANCE_CONNECTIONS_DEFAULT;BALANCE_CONNECTIONS_EXACT From 3716b69792c527c84e212eff9503622961941fac Mon Sep 17 00:00:00 2001 From: modrake <12264057+modrake@users.noreply.github.com> Date: Mon, 16 Oct 2023 08:53:31 -0700 Subject: [PATCH 008/130] Relplat 897 copywrite bot workarounds (#19200) Co-authored-by: Ronald Ekambi --- Makefile | 3 +++ api/.copywrite.hcl | 8 ++++++++ api/config_entry_routes_test.go | 3 +++ api/config_entry_status_test.go | 3 +++ api/internal.go | 3 +++ api/internal_test.go | 3 +++ api/operator_audit.go | 2 +- build-support/scripts/copywrite-exceptions.sh | 14 ++++++++++++++ sdk/.copywrite.hcl | 8 ++++++++ sdk/testutil/retry/counter.go | 3 +++ sdk/testutil/retry/timer.go | 3 +++ 11 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 api/.copywrite.hcl create mode 100755 build-support/scripts/copywrite-exceptions.sh create mode 100644 sdk/.copywrite.hcl diff --git a/Makefile b/Makefile index 4ee07faef500..4d804062377e 100644 --- a/Makefile +++ b/Makefile @@ -438,6 +438,9 @@ codegen: codegen-tools ## Deep copy @$(SHELL) $(CURDIR)/agent/consul/state/deep-copy.sh @$(SHELL) $(CURDIR)/agent/config/deep-copy.sh copywrite headers + # Special case for MPL headers in /api and /sdk + cd api && $(CURDIR)/build-support/scripts/copywrite-exceptions.sh + cd sdk && $(CURDIR)/build-support/scripts/copywrite-exceptions.sh print-% : ; @echo $($*) ## utility to echo a makefile variable (i.e. 'make print-GOPATH') diff --git a/api/.copywrite.hcl b/api/.copywrite.hcl new file mode 100644 index 000000000000..34d99ba25e12 --- /dev/null +++ b/api/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2023 + + header_ignore = [] +} diff --git a/api/config_entry_routes_test.go b/api/config_entry_routes_test.go index b878612e907e..0a4f8e38b1b8 100644 --- a/api/config_entry_routes_test.go +++ b/api/config_entry_routes_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/config_entry_status_test.go b/api/config_entry_status_test.go index 9c6eaf034c32..ec64c8716407 100644 --- a/api/config_entry_status_test.go +++ b/api/config_entry_status_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "testing" diff --git a/api/internal.go b/api/internal.go index dee161a65eb2..b5f400f4b19b 100644 --- a/api/internal.go +++ b/api/internal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "context" diff --git a/api/internal_test.go b/api/internal_test.go index ce088f1787d1..ce773d7360f9 100644 --- a/api/internal_test.go +++ b/api/internal_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/operator_audit.go b/api/operator_audit.go index b255d67f4e31..5240d38a70d7 100644 --- a/api/operator_audit.go +++ b/api/operator_audit.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // The /v1/operator/audit-hash endpoint is available only in Consul Enterprise and // interact with its audit logging subsystem. diff --git a/build-support/scripts/copywrite-exceptions.sh b/build-support/scripts/copywrite-exceptions.sh new file mode 100755 index 000000000000..f6ca45626cf2 --- /dev/null +++ b/build-support/scripts/copywrite-exceptions.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +# Used as a stopgap for copywrite bot in MPL-licensed subdirs, detects BUSL licensed +# headers and deletes them, then runs the copywrite bot to utilize local subdir config +# to inject correct headers. + +find . -type f -name '*.go' | while read line; do + if grep "SPDX-License-Identifier: BUSL-1.1" $line; then + sed -i '/SPDX-License-Identifier: BUSL-1.1/d' $line + sed -i '/Copyright (c) HashiCorp, Inc./d' $line + fi +done + +copywrite headers diff --git a/sdk/.copywrite.hcl b/sdk/.copywrite.hcl new file mode 100644 index 000000000000..34d99ba25e12 --- /dev/null +++ b/sdk/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2023 + + header_ignore = [] +} diff --git a/sdk/testutil/retry/counter.go b/sdk/testutil/retry/counter.go index 96a37ab9d2fc..ffd509f1a414 100644 --- a/sdk/testutil/retry/counter.go +++ b/sdk/testutil/retry/counter.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package retry import "time" diff --git a/sdk/testutil/retry/timer.go b/sdk/testutil/retry/timer.go index 16433e9ec7b0..be4f5e92f407 100644 --- a/sdk/testutil/retry/timer.go +++ b/sdk/testutil/retry/timer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package retry import "time" From 6c7d0759e4d32c96a1c26d7eb3c2ab6a842bf97c Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:18:56 -0500 Subject: [PATCH 009/130] mesh: add xRoute ACL hook tenancy tests (#19177) Enhance the xRoute ACL hook tests to cover tenanted situations. These tests will only execute in enterprise. --- internal/mesh/internal/types/xroute_test.go | 245 +++++++++++--------- internal/resource/resourcetest/acls.go | 53 +++-- 2 files changed, 170 insertions(+), 128 deletions(-) diff --git a/internal/mesh/internal/types/xroute_test.go b/internal/mesh/internal/types/xroute_test.go index cd6d6d766327..5ba39e24cc07 100644 --- a/internal/mesh/internal/types/xroute_test.go +++ b/internal/mesh/internal/types/xroute_test.go @@ -12,7 +12,6 @@ import ( "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/wrapperspb" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" @@ -114,6 +113,7 @@ func getXRouteParentRefTestCases() map[string]xRouteParentRefTestcase { Port: port, } } + return map[string]xRouteParentRefTestcase{ "no parent refs": { routeTenancy: resource.DefaultNamespacedTenancy(), @@ -372,145 +372,160 @@ func testXRouteACLs[R XRouteData](t *testing.T, newRoute func(t *testing.T, pare userNewRoute := newRoute newRoute = func(t *testing.T, parentRefs, backendRefs []*pbresource.Reference) *pbresource.Resource { + require.NotEmpty(t, parentRefs) + require.NotEmpty(t, backendRefs) res := userNewRoute(t, parentRefs, backendRefs) + res.Id.Tenancy = parentRefs[0].Tenancy resourcetest.ValidateAndNormalize(t, registry, res) return res } - type testcase struct { - res *pbresource.Resource - rules string - check func(t *testing.T, authz acl.Authorizer, res *pbresource.Resource) - readOK string - writeOK string - } - const ( - DENY = "deny" - ALLOW = "allow" - DEFAULT = "default" + DENY = resourcetest.DENY + ALLOW = resourcetest.ALLOW + DEFAULT = resourcetest.DEFAULT ) - checkF := func(t *testing.T, name string, expect string, got error) { - switch expect { - case ALLOW: - if acl.IsErrPermissionDenied(got) { - t.Fatal(name + " should be allowed") - } - case DENY: - if !acl.IsErrPermissionDenied(got) { - t.Fatal(name + " should be denied") - } - case DEFAULT: - require.Nil(t, got, name+" expected fallthrough decision") - default: - t.Fatalf(name+" unexpected expectation: %q", expect) - } + serviceRef := func(tenancy, name string) *pbresource.Reference { + return newRefWithTenancy(pbcatalog.ServiceType, tenancy, name) } - resOneParentOneBackend := newRoute(t, - []*pbresource.Reference{ - newRef(pbcatalog.ServiceType, "api1"), - }, - []*pbresource.Reference{ - newRef(pbcatalog.ServiceType, "backend1"), - }, - ) - resTwoParentsOneBackend := newRoute(t, - []*pbresource.Reference{ - newRef(pbcatalog.ServiceType, "api1"), - newRef(pbcatalog.ServiceType, "api2"), - }, - []*pbresource.Reference{ - newRef(pbcatalog.ServiceType, "backend1"), - }, - ) - resOneParentTwoBackends := newRoute(t, - []*pbresource.Reference{ - newRef(pbcatalog.ServiceType, "api1"), - }, - []*pbresource.Reference{ - newRef(pbcatalog.ServiceType, "backend1"), - newRef(pbcatalog.ServiceType, "backend2"), - }, - ) - resTwoParentsTwoBackends := newRoute(t, - []*pbresource.Reference{ - newRef(pbcatalog.ServiceType, "api1"), - newRef(pbcatalog.ServiceType, "api2"), - }, - []*pbresource.Reference{ - newRef(pbcatalog.ServiceType, "backend1"), - newRef(pbcatalog.ServiceType, "backend2"), - }, - ) + resOneParentOneBackend := func(parentTenancy, backendTenancy string) *pbresource.Resource { + return newRoute(t, + []*pbresource.Reference{ + serviceRef(parentTenancy, "api1"), + }, + []*pbresource.Reference{ + serviceRef(backendTenancy, "backend1"), + }, + ) + } + resTwoParentsOneBackend := func(parentTenancy, backendTenancy string) *pbresource.Resource { + return newRoute(t, + []*pbresource.Reference{ + serviceRef(parentTenancy, "api1"), + serviceRef(parentTenancy, "api2"), + }, + []*pbresource.Reference{ + serviceRef(backendTenancy, "backend1"), + }, + ) + } + resOneParentTwoBackends := func(parentTenancy, backendTenancy string) *pbresource.Resource { + return newRoute(t, + []*pbresource.Reference{ + serviceRef(parentTenancy, "api1"), + }, + []*pbresource.Reference{ + serviceRef(backendTenancy, "backend1"), + serviceRef(backendTenancy, "backend2"), + }, + ) + } + resTwoParentsTwoBackends := func(parentTenancy, backendTenancy string) *pbresource.Resource { + return newRoute(t, + []*pbresource.Reference{ + serviceRef(parentTenancy, "api1"), + serviceRef(parentTenancy, "api2"), + }, + []*pbresource.Reference{ + serviceRef(backendTenancy, "backend1"), + serviceRef(backendTenancy, "backend2"), + }, + ) + } - run := func(t *testing.T, name string, tc testcase) { + run := func(t *testing.T, name string, tc resourcetest.ACLTestCase) { t.Run(name, func(t *testing.T) { - config := acl.Config{ - WildcardName: structs.WildcardSpecifier, - } - authz, err := acl.NewAuthorizerFromRules(tc.rules, &config, nil) - require.NoError(t, err) - authz = acl.NewChainedAuthorizer([]acl.Authorizer{authz, acl.DenyAll()}) - - reg, ok := registry.Resolve(tc.res.Id.GetType()) - require.True(t, ok) - - err = reg.ACLs.Read(authz, &acl.AuthorizerContext{}, tc.res.Id, nil) - require.ErrorIs(t, err, resource.ErrNeedResource, "read hook should require the data payload") - - checkF(t, "read", tc.readOK, reg.ACLs.Read(authz, &acl.AuthorizerContext{}, tc.res.Id, tc.res)) - checkF(t, "write", tc.writeOK, reg.ACLs.Write(authz, &acl.AuthorizerContext{}, tc.res)) - checkF(t, "list", DEFAULT, reg.ACLs.List(authz, &acl.AuthorizerContext{})) + resourcetest.RunACLTestCase(t, tc, registry) }) } - serviceRead := func(name string) string { + isEnterprise := (structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty() == "default") + + serviceRead := func(partition, namespace, name string) string { + if isEnterprise { + return fmt.Sprintf(` partition %q { namespace %q { service %q { policy = "read" } } }`, partition, namespace, name) + } return fmt.Sprintf(` service %q { policy = "read" } `, name) } - serviceWrite := func(name string) string { + serviceWrite := func(partition, namespace, name string) string { + if isEnterprise { + return fmt.Sprintf(` partition %q { namespace %q { service %q { policy = "write" } } }`, partition, namespace, name) + } return fmt.Sprintf(` service %q { policy = "write" } `, name) } assert := func(t *testing.T, name string, rules string, res *pbresource.Resource, readOK, writeOK string) { - tc := testcase{ - res: res, - rules: rules, - readOK: readOK, - writeOK: writeOK, + tc := resourcetest.ACLTestCase{ + Rules: rules, + Res: res, + ReadOK: readOK, + WriteOK: writeOK, + ListOK: DEFAULT, + ReadHookRequiresResource: true, } run(t, name, tc) } - t.Run("no rules", func(t *testing.T) { - rules := `` - assert(t, "1parent 1backend", rules, resOneParentOneBackend, DENY, DENY) - assert(t, "1parent 2backends", rules, resOneParentTwoBackends, DENY, DENY) - assert(t, "2parents 1backend", rules, resTwoParentsOneBackend, DENY, DENY) - assert(t, "2parents 2backends", rules, resTwoParentsTwoBackends, DENY, DENY) - }) - t.Run("api1:read", func(t *testing.T) { - rules := serviceRead("api1") - assert(t, "1parent 1backend", rules, resOneParentOneBackend, ALLOW, DENY) - assert(t, "1parent 2backends", rules, resOneParentTwoBackends, ALLOW, DENY) - assert(t, "2parents 1backend", rules, resTwoParentsOneBackend, DENY, DENY) - assert(t, "2parents 2backends", rules, resTwoParentsTwoBackends, DENY, DENY) - }) - t.Run("api1:write", func(t *testing.T) { - rules := serviceWrite("api1") - assert(t, "1parent 1backend", rules, resOneParentOneBackend, ALLOW, DENY) - assert(t, "1parent 2backends", rules, resOneParentTwoBackends, ALLOW, DENY) - assert(t, "2parents 1backend", rules, resTwoParentsOneBackend, DENY, DENY) - assert(t, "2parents 2backends", rules, resTwoParentsTwoBackends, DENY, DENY) - }) - t.Run("api1:write backend1:read", func(t *testing.T) { - rules := serviceWrite("api1") + serviceRead("backend1") - assert(t, "1parent 1backend", rules, resOneParentOneBackend, ALLOW, ALLOW) - assert(t, "1parent 2backends", rules, resOneParentTwoBackends, ALLOW, DENY) - assert(t, "2parents 1backend", rules, resTwoParentsOneBackend, DENY, DENY) - assert(t, "2parents 2backends", rules, resTwoParentsTwoBackends, DENY, DENY) - }) + tenancies := []string{"default.default"} + if isEnterprise { + tenancies = append(tenancies, "default.foo", "alpha.default", "alpha.foo") + } + + for _, parentTenancyStr := range tenancies { + t.Run("route tenancy: "+parentTenancyStr, func(t *testing.T) { + for _, backendTenancyStr := range tenancies { + t.Run("backend tenancy: "+backendTenancyStr, func(t *testing.T) { + for _, aclTenancyStr := range tenancies { + t.Run("acl tenancy: "+aclTenancyStr, func(t *testing.T) { + aclTenancy := resourcetest.Tenancy(aclTenancyStr) + + maybe := func(match string, parentOnly bool) string { + if parentTenancyStr != aclTenancyStr { + return DENY + } + if !parentOnly && backendTenancyStr != aclTenancyStr { + return DENY + } + return match + } + + t.Run("no rules", func(t *testing.T) { + rules := `` + assert(t, "1parent 1backend", rules, resOneParentOneBackend(parentTenancyStr, backendTenancyStr), DENY, DENY) + assert(t, "1parent 2backends", rules, resOneParentTwoBackends(parentTenancyStr, backendTenancyStr), DENY, DENY) + assert(t, "2parents 1backend", rules, resTwoParentsOneBackend(parentTenancyStr, backendTenancyStr), DENY, DENY) + assert(t, "2parents 2backends", rules, resTwoParentsTwoBackends(parentTenancyStr, backendTenancyStr), DENY, DENY) + }) + t.Run("api1:read", func(t *testing.T) { + rules := serviceRead(aclTenancy.Partition, aclTenancy.Namespace, "api1") + assert(t, "1parent 1backend", rules, resOneParentOneBackend(parentTenancyStr, backendTenancyStr), maybe(ALLOW, true), DENY) + assert(t, "1parent 2backends", rules, resOneParentTwoBackends(parentTenancyStr, backendTenancyStr), maybe(ALLOW, true), DENY) + assert(t, "2parents 1backend", rules, resTwoParentsOneBackend(parentTenancyStr, backendTenancyStr), DENY, DENY) + assert(t, "2parents 2backends", rules, resTwoParentsTwoBackends(parentTenancyStr, backendTenancyStr), DENY, DENY) + }) + t.Run("api1:write", func(t *testing.T) { + rules := serviceWrite(aclTenancy.Partition, aclTenancy.Namespace, "api1") + assert(t, "1parent 1backend", rules, resOneParentOneBackend(parentTenancyStr, backendTenancyStr), maybe(ALLOW, true), DENY) + assert(t, "1parent 2backends", rules, resOneParentTwoBackends(parentTenancyStr, backendTenancyStr), maybe(ALLOW, true), DENY) + assert(t, "2parents 1backend", rules, resTwoParentsOneBackend(parentTenancyStr, backendTenancyStr), DENY, DENY) + assert(t, "2parents 2backends", rules, resTwoParentsTwoBackends(parentTenancyStr, backendTenancyStr), DENY, DENY) + }) + t.Run("api1:write backend1:read", func(t *testing.T) { + rules := serviceWrite(aclTenancy.Partition, aclTenancy.Namespace, "api1") + + serviceRead(aclTenancy.Partition, aclTenancy.Namespace, "backend1") + assert(t, "1parent 1backend", rules, resOneParentOneBackend(parentTenancyStr, backendTenancyStr), maybe(ALLOW, true), maybe(ALLOW, false)) + assert(t, "1parent 2backends", rules, resOneParentTwoBackends(parentTenancyStr, backendTenancyStr), maybe(ALLOW, true), DENY) + assert(t, "2parents 1backend", rules, resTwoParentsOneBackend(parentTenancyStr, backendTenancyStr), DENY, DENY) + assert(t, "2parents 2backends", rules, resTwoParentsTwoBackends(parentTenancyStr, backendTenancyStr), DENY, DENY) + }) + }) + } + }) + } + }) + } } func newRef(typ *pbresource.Type, name string) *pbresource.Reference { diff --git a/internal/resource/resourcetest/acls.go b/internal/resource/resourcetest/acls.go index 4aff9e30327b..4ce8cc9d7ae0 100644 --- a/internal/resource/resourcetest/acls.go +++ b/internal/resource/resourcetest/acls.go @@ -39,27 +39,49 @@ var checkF = func(t *testing.T, expect string, got error) { } type ACLTestCase struct { - Rules string - Data protoreflect.ProtoMessage - Owner *pbresource.ID - Typ *pbresource.Type + Rules string + + // One of either Res or Data/Owner/Typ should be set. + Res *pbresource.Resource + Data protoreflect.ProtoMessage + Owner *pbresource.ID + Typ *pbresource.Type + ReadOK string WriteOK string ListOK string + + ReadHookRequiresResource bool } func RunACLTestCase(t *testing.T, tc ACLTestCase, registry resource.Registry) { - reg, ok := registry.Resolve(tc.Typ) - require.True(t, ok) + var ( + typ *pbresource.Type + res *pbresource.Resource + ) + if tc.Res != nil { + require.Nil(t, tc.Data) + require.Nil(t, tc.Owner) + require.Nil(t, tc.Typ) + typ = tc.Res.Id.GetType() + res = tc.Res + } else { + require.NotNil(t, tc.Data) + require.NotNil(t, tc.Typ) + typ = tc.Typ - resolvedType, ok := registry.Resolve(tc.Typ) - require.True(t, ok) + resolvedType, ok := registry.Resolve(typ) + require.True(t, ok) - res := Resource(tc.Typ, "test"). - WithTenancy(DefaultTenancyForType(t, resolvedType)). - WithOwner(tc.Owner). - WithData(t, tc.Data). - Build() + res = Resource(tc.Typ, "test"). + WithTenancy(DefaultTenancyForType(t, resolvedType)). + WithOwner(tc.Owner). + WithData(t, tc.Data). + Build() + } + + reg, ok := registry.Resolve(typ) + require.True(t, ok) ValidateAndNormalize(t, registry, res) @@ -70,6 +92,11 @@ func RunACLTestCase(t *testing.T, tc ACLTestCase, registry resource.Registry) { require.NoError(t, err) authz = acl.NewChainedAuthorizer([]acl.Authorizer{authz, acl.DenyAll()}) + if tc.ReadHookRequiresResource { + err = reg.ACLs.Read(authz, &acl.AuthorizerContext{}, res.Id, nil) + require.ErrorIs(t, err, resource.ErrNeedResource, "read hook should require the data payload") + } + t.Run("read", func(t *testing.T) { err := reg.ACLs.Read(authz, &acl.AuthorizerContext{}, res.Id, res) checkF(t, tc.ReadOK, err) From ad177698f7b0da083f67d6e695c420c29b544cfd Mon Sep 17 00:00:00 2001 From: Semir Patel Date: Mon, 16 Oct 2023 12:55:30 -0500 Subject: [PATCH 010/130] resource: enforce lowercase v2 resource names (#19218) --- .changelog/19218.txt | 3 + .../grpc-external/services/resource/delete.go | 3 +- .../services/resource/delete_test.go | 94 ++++++-- agent/grpc-external/services/resource/list.go | 5 +- .../services/resource/list_by_owner.go | 3 - .../services/resource/list_by_owner_test.go | 158 +++++++++---- .../services/resource/list_test.go | 60 ++++- .../services/resource/read_test.go | 169 ++++++++++---- .../grpc-external/services/resource/server.go | 73 +++++- .../services/resource/server_test.go | 28 --- .../grpc-external/services/resource/watch.go | 5 +- .../services/resource/watch_test.go | 59 ++++- .../services/resource/write_status.go | 13 +- .../services/resource/write_status_test.go | 173 ++++++++++---- .../services/resource/write_test.go | 213 ++++++++++++------ .../controllers/xds/controller_test.go | 5 +- internal/resource/demo/controller.go | 2 +- internal/resource/demo/demo.go | 2 +- internal/resource/http/http_test.go | 12 +- internal/resource/resource.go | 22 ++ internal/resource/tenancy.go | 17 -- .../tenancy/internal/types/namespace_test.go | 16 +- 22 files changed, 816 insertions(+), 319 deletions(-) create mode 100644 .changelog/19218.txt create mode 100644 internal/resource/resource.go diff --git a/.changelog/19218.txt b/.changelog/19218.txt new file mode 100644 index 000000000000..a3dde32317b4 --- /dev/null +++ b/.changelog/19218.txt @@ -0,0 +1,3 @@ +```release-note:improvement +resource: lowercase names enforced for v2 resources only. +``` \ No newline at end of file diff --git a/agent/grpc-external/services/resource/delete.go b/agent/grpc-external/services/resource/delete.go index 2f30e27f983f..a2d3bec995d4 100644 --- a/agent/grpc-external/services/resource/delete.go +++ b/agent/grpc-external/services/resource/delete.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "strings" "time" "github.com/oklog/ulid/v2" @@ -175,5 +176,5 @@ func (s *Server) validateDeleteRequest(req *pbresource.DeleteRequest) (*resource // name by embedding the resources's Uid in the name. func tombstoneName(deleteId *pbresource.ID) string { // deleteId.Name is just included for easier identification - return fmt.Sprintf("tombstone-%v-%v", deleteId.Name, deleteId.Uid) + return fmt.Sprintf("tombstone-%v-%v", deleteId.Name, strings.ToLower(deleteId.Uid)) } diff --git a/agent/grpc-external/services/resource/delete_test.go b/agent/grpc-external/services/resource/delete_test.go index 5f5d7d7e2192..3bdbb0581d10 100644 --- a/agent/grpc-external/services/resource/delete_test.go +++ b/agent/grpc-external/services/resource/delete_test.go @@ -5,6 +5,7 @@ package resource import ( "context" + "strings" "testing" "github.com/stretchr/testify/mock" @@ -22,39 +23,98 @@ import ( func TestDelete_InputValidation(t *testing.T) { server := testServer(t) client := testClient(t, server) - demo.RegisterTypes(server.Registry) - testCases := map[string]func(artistId, recordLabelId *pbresource.ID) *pbresource.ID{ - "no id": func(artistId, recordLabelId *pbresource.ID) *pbresource.ID { - return nil + type testCase struct { + modFn func(artistId, recordLabelId *pbresource.ID) *pbresource.ID + errContains string + } + + testCases := map[string]testCase{ + "no id": { + modFn: func(_, _ *pbresource.ID) *pbresource.ID { + return nil + }, + errContains: "id is required", + }, + "no type": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Type = nil + return artistId + }, + errContains: "id.type is required", + }, + "no name": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Name = "" + return artistId + }, + errContains: "id.name invalid", + }, + "mixed case name": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Name = "DepecheMode" + return artistId + }, + errContains: "id.name invalid", + }, + "name too long": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Name = strings.Repeat("n", resource.MaxNameLength+1) + return artistId + }, + errContains: "id.name invalid", + }, + "partition mixed case": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Partition = "Default" + return artistId + }, + errContains: "id.tenancy.partition invalid", }, - "no type": func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Type = nil - return artistId + "partition name too long": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) + return artistId + }, + errContains: "id.tenancy.partition invalid", }, - "no name": func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "" - return artistId + "namespace mixed case": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Namespace = "Default" + return artistId + }, + errContains: "id.tenancy.namespace invalid", }, - "partition scoped resource with namespace": func(_, recordLabelId *pbresource.ID) *pbresource.ID { - recordLabelId.Tenancy.Namespace = "ishouldnothaveanamespace" - return recordLabelId + "namespace name too long": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) + return artistId + }, + errContains: "id.tenancy.namespace invalid", + }, + "partition scoped resource with namespace": { + modFn: func(_, recordLabelId *pbresource.ID) *pbresource.ID { + recordLabelId.Tenancy.Namespace = "ishouldnothaveanamespace" + return recordLabelId + }, + errContains: "cannot have a namespace", }, } - for desc, modFn := range testCases { + for desc, tc := range testCases { t.Run(desc, func(t *testing.T) { - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) artist, err := demo.GenerateV2Artist() require.NoError(t, err) - req := &pbresource.DeleteRequest{Id: modFn(artist.Id, recordLabel.Id), Version: ""} + req := &pbresource.DeleteRequest{Id: tc.modFn(artist.Id, recordLabel.Id), Version: ""} _, err = client.Delete(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, tc.errContains) }) } } @@ -129,7 +189,7 @@ func TestDelete_Success(t *testing.T) { server, client, ctx := testDeps(t) demo.RegisterTypes(server.Registry) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) writeRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: recordLabel}) require.NoError(t, err) diff --git a/agent/grpc-external/services/resource/list.go b/agent/grpc-external/services/resource/list.go index c1ecb253448c..befb619eec53 100644 --- a/agent/grpc-external/services/resource/list.go +++ b/agent/grpc-external/services/resource/list.go @@ -100,8 +100,9 @@ func (s *Server) validateListRequest(req *pbresource.ListRequest) (*resource.Reg return nil, err } - // Lowercase - resource.Normalize(req.Tenancy) + if err := validateWildcardTenancy(req.Tenancy, req.NamePrefix); err != nil { + return nil, err + } // Error when partition scoped and namespace not empty. if reg.Scope == resource.ScopePartition && req.Tenancy.Namespace != "" { diff --git a/agent/grpc-external/services/resource/list_by_owner.go b/agent/grpc-external/services/resource/list_by_owner.go index 2310a5b50eda..a9b1754498fe 100644 --- a/agent/grpc-external/services/resource/list_by_owner.go +++ b/agent/grpc-external/services/resource/list_by_owner.go @@ -105,9 +105,6 @@ func (s *Server) validateListByOwnerRequest(req *pbresource.ListByOwnerRequest) return nil, err } - // Lowercase - resource.Normalize(req.Owner.Tenancy) - // Error when partition scoped and namespace not empty. if reg.Scope == resource.ScopePartition && req.Owner.Tenancy.Namespace != "" { return nil, status.Errorf( diff --git a/agent/grpc-external/services/resource/list_by_owner_test.go b/agent/grpc-external/services/resource/list_by_owner_test.go index 11c6027c0b64..78024e68d0fb 100644 --- a/agent/grpc-external/services/resource/list_by_owner_test.go +++ b/agent/grpc-external/services/resource/list_by_owner_test.go @@ -6,6 +6,7 @@ package resource import ( "context" "fmt" + "strings" "testing" "github.com/hashicorp/consul/acl" @@ -13,6 +14,7 @@ import ( "github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto/private/prototest" + "github.com/oklog/ulid/v2" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -26,41 +28,104 @@ func TestListByOwner_InputValidation(t *testing.T) { client := testClient(t, server) demo.RegisterTypes(server.Registry) - testCases := map[string]func(artistId, recordlabelId *pbresource.ID) *pbresource.ID{ - "no owner": func(artistId, recordLabelId *pbresource.ID) *pbresource.ID { - return nil + type testCase struct { + modFn func(artistId, recordlabelId *pbresource.ID) *pbresource.ID + errContains string + } + testCases := map[string]testCase{ + "no owner": { + modFn: func(artistId, recordLabelId *pbresource.ID) *pbresource.ID { + return nil + }, + errContains: "owner is required", + }, + "no type": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Type = nil + return artistId + }, + errContains: "owner.type is required", + }, + "no name": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Name = "" + return artistId + }, + errContains: "owner.name invalid", + }, + "name mixed case": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Name = "U2" + return artistId + }, + errContains: "owner.name invalid", }, - "no type": func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Type = nil - return artistId + "name too long": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Name = strings.Repeat("n", resource.MaxNameLength+1) + return artistId + }, + errContains: "owner.name invalid", }, - "no name": func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "" - return artistId + "partition mixed case": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Partition = "Default" + return artistId + }, + errContains: "owner.tenancy.partition invalid", }, - "no uid": func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Uid = "" - return artistId + "partition too long": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) + return artistId + }, + errContains: "owner.tenancy.partition invalid", }, - "partition scope with non-empty namespace": func(_, recordLabelId *pbresource.ID) *pbresource.ID { - recordLabelId.Tenancy.Namespace = "ishouldnothaveanamespace" - return recordLabelId + "namespace mixed case": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Namespace = "Default" + return artistId + }, + errContains: "owner.tenancy.namespace invalid", + }, + "namespace too long": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) + return artistId + }, + errContains: "owner.tenancy.namespace invalid", + }, + "no uid": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Uid = "" + return artistId + }, + errContains: "owner uid is required", + }, + "partition scope with non-empty namespace": { + modFn: func(_, recordLabelId *pbresource.ID) *pbresource.ID { + recordLabelId.Uid = ulid.Make().String() + recordLabelId.Tenancy.Namespace = "ishouldnothaveanamespace" + return recordLabelId + }, + errContains: "cannot have a namespace", }, } - for desc, modFn := range testCases { + for desc, tc := range testCases { t.Run(desc, func(t *testing.T) { artist, err := demo.GenerateV2Artist() require.NoError(t, err) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) // Each test case picks which resource to use based on the resource type's scope. - req := &pbresource.ListByOwnerRequest{Owner: modFn(artist.Id, recordLabel.Id)} + req := &pbresource.ListByOwnerRequest{Owner: tc.modFn(artist.Id, recordLabel.Id)} _, err = client.ListByOwner(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, tc.errContains) }) } } @@ -131,33 +196,46 @@ func TestListByOwner_Many(t *testing.T) { } func TestListByOwner_OwnerTenancyDoesNotExist(t *testing.T) { - tenancyCases := map[string]func(artistId, recordlabelId *pbresource.ID) *pbresource.ID{ - "partition not found when namespace scoped": func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Uid = "doesnotmatter" - id.Tenancy.Partition = "boguspartition" - return id + type testCase struct { + modFn func(artistId, recordlabelId *pbresource.ID) *pbresource.ID + errContains string + } + tenancyCases := map[string]testCase{ + "partition not found when namespace scoped": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + id := clone(artistId) + id.Uid = "doesnotmatter" + id.Tenancy.Partition = "boguspartition" + return id + }, + errContains: "partition not found", }, - "namespace not found when namespace scoped": func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Uid = "doesnotmatter" - id.Tenancy.Namespace = "bogusnamespace" - return id + "namespace not found when namespace scoped": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + id := clone(artistId) + id.Uid = "doesnotmatter" + id.Tenancy.Namespace = "bogusnamespace" + return id + }, + errContains: "namespace not found", }, - "partition not found when partition scoped": func(_, recordLabelId *pbresource.ID) *pbresource.ID { - id := clone(recordLabelId) - id.Uid = "doesnotmatter" - id.Tenancy.Partition = "boguspartition" - return id + "partition not found when partition scoped": { + modFn: func(_, recordLabelId *pbresource.ID) *pbresource.ID { + id := clone(recordLabelId) + id.Uid = "doesnotmatter" + id.Tenancy.Partition = "boguspartition" + return id + }, + errContains: "partition not found", }, } - for desc, modFn := range tenancyCases { + for desc, tc := range tenancyCases { t.Run(desc, func(t *testing.T) { server := testServer(t) demo.RegisterTypes(server.Registry) client := testClient(t, server) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) recordLabel, err = server.Backend.WriteCAS(testContext(t), recordLabel) require.NoError(t, err) @@ -167,11 +245,11 @@ func TestListByOwner_OwnerTenancyDoesNotExist(t *testing.T) { artist, err = server.Backend.WriteCAS(testContext(t), artist) require.NoError(t, err) - // Verify non-existant tenancy units in owner err with not found. - _, err = client.ListByOwner(testContext(t), &pbresource.ListByOwnerRequest{Owner: modFn(artist.Id, recordLabel.Id)}) + // Verify non-existant tenancy units in owner err with invalid arg. + _, err = client.ListByOwner(testContext(t), &pbresource.ListByOwnerRequest{Owner: tc.modFn(artist.Id, recordLabel.Id)}) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource not found") + require.ErrorContains(t, err, tc.errContains) }) } } @@ -184,7 +262,7 @@ func TestListByOwner_Tenancy_Defaults_And_Normalization(t *testing.T) { client := testClient(t, server) // Create partition scoped recordLabel. - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) rsp1, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: recordLabel}) require.NoError(t, err) diff --git a/agent/grpc-external/services/resource/list_test.go b/agent/grpc-external/services/resource/list_test.go index 64026b7d34e5..a80685b0834e 100644 --- a/agent/grpc-external/services/resource/list_test.go +++ b/agent/grpc-external/services/resource/list_test.go @@ -6,6 +6,7 @@ package resource import ( "context" "fmt" + "strings" "testing" "github.com/hashicorp/consul/acl" @@ -26,28 +27,66 @@ import ( func TestList_InputValidation(t *testing.T) { server := testServer(t) client := testClient(t, server) - demo.RegisterTypes(server.Registry) - testCases := map[string]func(*pbresource.ListRequest){ - "no type": func(req *pbresource.ListRequest) { req.Type = nil }, - "no tenancy": func(req *pbresource.ListRequest) { req.Tenancy = nil }, - "partitioned resource provides non-empty namespace": func(req *pbresource.ListRequest) { - req.Type = demo.TypeV1RecordLabel - req.Tenancy.Namespace = "bad" + type testCase struct { + modReqFn func(req *pbresource.ListRequest) + errContains string + } + + testCases := map[string]testCase{ + "no type": { + modReqFn: func(req *pbresource.ListRequest) { req.Type = nil }, + errContains: "type is required", + }, + "no tenancy": { + modReqFn: func(req *pbresource.ListRequest) { req.Tenancy = nil }, + errContains: "tenancy is required", + }, + "partition mixed case": { + modReqFn: func(req *pbresource.ListRequest) { req.Tenancy.Partition = "Default" }, + errContains: "tenancy.partition invalid", + }, + "partition too long": { + modReqFn: func(req *pbresource.ListRequest) { + req.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) + }, + errContains: "tenancy.partition invalid", + }, + "namespace mixed case": { + modReqFn: func(req *pbresource.ListRequest) { req.Tenancy.Namespace = "Default" }, + errContains: "tenancy.namespace invalid", + }, + "namespace too long": { + modReqFn: func(req *pbresource.ListRequest) { + req.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) + }, + errContains: "tenancy.namespace invalid", + }, + "name_prefix mixed case": { + modReqFn: func(req *pbresource.ListRequest) { req.NamePrefix = "Violator" }, + errContains: "name_prefix invalid", + }, + "partitioned resource provides non-empty namespace": { + modReqFn: func(req *pbresource.ListRequest) { + req.Type = demo.TypeV1RecordLabel + req.Tenancy.Namespace = "bad" + }, + errContains: "cannot have a namespace", }, } - for desc, modFn := range testCases { + for desc, tc := range testCases { t.Run(desc, func(t *testing.T) { req := &pbresource.ListRequest{ Type: demo.TypeV2Album, Tenancy: resource.DefaultNamespacedTenancy(), } - modFn(req) + tc.modReqFn(req) _, err := client.List(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, tc.errContains) }) } } @@ -126,7 +165,7 @@ func TestList_Tenancy_Defaults_And_Normalization(t *testing.T) { client := testClient(t, server) // Write partition scoped record label - recordLabel, err := demo.GenerateV1RecordLabel("LooneyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) recordLabelRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: recordLabel}) require.NoError(t, err) @@ -150,7 +189,6 @@ func TestList_Tenancy_Defaults_And_Normalization(t *testing.T) { prototest.AssertDeepEqual(t, artistRsp.Resource, listRsp.Resources[0]) } }) - } } diff --git a/agent/grpc-external/services/resource/read_test.go b/agent/grpc-external/services/resource/read_test.go index 2601689bc6c4..2afdfeab0e1e 100644 --- a/agent/grpc-external/services/resource/read_test.go +++ b/agent/grpc-external/services/resource/read_test.go @@ -6,6 +6,7 @@ package resource import ( "context" "fmt" + "strings" "sync" "testing" @@ -34,46 +35,114 @@ func TestRead_InputValidation(t *testing.T) { tenancy.RegisterTypes(server.Registry) demo.RegisterTypes(server.Registry) - testCases := map[string]func(artistId, recordlabelId, executiveId *pbresource.ID) *pbresource.ID{ - "no id": func(_, _, _ *pbresource.ID) *pbresource.ID { return nil }, - "no type": func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Type = nil - return artistId + type testCase struct { + modFn func(artistId, recordlabelId, executiveId *pbresource.ID) *pbresource.ID + errContains string + } + + testCases := map[string]testCase{ + "no id": { + modFn: func(_, _, _ *pbresource.ID) *pbresource.ID { + return nil + }, + errContains: "id is required", + }, + "no type": { + modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { + artistId.Type = nil + return artistId + }, + errContains: "id.type is required", + }, + "no name": { + modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { + artistId.Name = "" + return artistId + }, + errContains: "id.name invalid", + }, + "name is mixed case": { + modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { + artistId.Name = "MixedCaseNotAllowed" + return artistId + }, + errContains: "id.name invalid", }, - "no name": func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "" - return artistId + "name too long": { + modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { + artistId.Name = strings.Repeat("a", resource.MaxNameLength+1) + return artistId + }, + errContains: "id.name invalid", }, - "partition scope with non-empty namespace": func(_, recordLabelId, _ *pbresource.ID) *pbresource.ID { - recordLabelId.Tenancy.Namespace = "ishouldnothaveanamespace" - return recordLabelId + "partition is mixed case": { + modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Partition = "Default" + return artistId + }, + errContains: "id.tenancy.partition invalid", }, - "cluster scope with non-empty partition": func(_, _, executiveId *pbresource.ID) *pbresource.ID { - executiveId.Tenancy = &pbresource.Tenancy{Partition: resource.DefaultPartitionName} - return executiveId + "partition too long": { + modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) + return artistId + }, + errContains: "id.tenancy.partition invalid", }, - "cluster scope with non-empty namespace": func(_, _, executiveId *pbresource.ID) *pbresource.ID { - executiveId.Tenancy = &pbresource.Tenancy{Namespace: resource.DefaultNamespaceName} - return executiveId + "namespace is mixed case": { + modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Namespace = "Default" + return artistId + }, + errContains: "id.tenancy.namespace invalid", + }, + "namespace too long": { + modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { + artistId.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) + return artistId + }, + errContains: "id.tenancy.namespace invalid", + }, + "partition scope with non-empty namespace": { + modFn: func(_, recordLabelId, _ *pbresource.ID) *pbresource.ID { + recordLabelId.Tenancy.Namespace = "ishouldnothaveanamespace" + return recordLabelId + }, + errContains: "cannot have a namespace", + }, + "cluster scope with non-empty partition": { + modFn: func(_, _, executiveId *pbresource.ID) *pbresource.ID { + executiveId.Tenancy = &pbresource.Tenancy{Partition: resource.DefaultPartitionName} + return executiveId + }, + errContains: "cannot have a partition", + }, + "cluster scope with non-empty namespace": { + modFn: func(_, _, executiveId *pbresource.ID) *pbresource.ID { + executiveId.Tenancy = &pbresource.Tenancy{Namespace: resource.DefaultNamespaceName} + return executiveId + }, + errContains: "cannot have a namespace", }, } - for desc, modFn := range testCases { + for desc, tc := range testCases { t.Run(desc, func(t *testing.T) { artist, err := demo.GenerateV2Artist() require.NoError(t, err) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) - executive, err := demo.GenerateV1Executive("MusicMan", "CEO") + executive, err := demo.GenerateV1Executive("music-man", "CEO") require.NoError(t, err) // Each test case picks which resource to use based on the resource type's scope. - req := &pbresource.ReadRequest{Id: modFn(artist.Id, recordLabel.Id, executive.Id)} + req := &pbresource.ReadRequest{Id: tc.modFn(artist.Id, recordLabel.Id, executive.Id)} _, err = client.Read(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, tc.errContains) }) } } @@ -94,34 +163,50 @@ func TestRead_TypeNotFound(t *testing.T) { func TestRead_ResourceNotFound(t *testing.T) { for desc, tc := range readTestCases() { t.Run(desc, func(t *testing.T) { - tenancyCases := map[string]func(artistId, recordlabelId *pbresource.ID) *pbresource.ID{ - "resource not found by name": func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "bogusname" - return artistId + type tenancyCase struct { + modFn func(artistId, recordlabelId *pbresource.ID) *pbresource.ID + errContains string + } + tenancyCases := map[string]tenancyCase{ + "resource not found by name": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Name = "bogusname" + return artistId + }, + errContains: "resource not found", }, - "partition not found when namespace scoped": func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Partition = "boguspartition" - return id + "partition not found when namespace scoped": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + id := clone(artistId) + id.Tenancy.Partition = "boguspartition" + return id + }, + errContains: "partition not found", }, - "namespace not found when namespace scoped": func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Namespace = "bogusnamespace" - return id + "namespace not found when namespace scoped": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + id := clone(artistId) + id.Tenancy.Namespace = "bogusnamespace" + return id + }, + errContains: "namespace not found", }, - "partition not found when partition scoped": func(_, recordLabelId *pbresource.ID) *pbresource.ID { - id := clone(recordLabelId) - id.Tenancy.Partition = "boguspartition" - return id + "partition not found when partition scoped": { + modFn: func(_, recordLabelId *pbresource.ID) *pbresource.ID { + id := clone(recordLabelId) + id.Tenancy.Partition = "boguspartition" + return id + }, + errContains: "partition not found", }, } - for tenancyDesc, modFn := range tenancyCases { + for tenancyDesc, tenancyCase := range tenancyCases { t.Run(tenancyDesc, func(t *testing.T) { server := testServer(t) demo.RegisterTypes(server.Registry) client := testClient(t, server) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) recordLabel, err = server.Backend.WriteCAS(tc.ctx, recordLabel) require.NoError(t, err) @@ -132,10 +217,10 @@ func TestRead_ResourceNotFound(t *testing.T) { require.NoError(t, err) // Each tenancy test case picks which resource to use based on the resource type's scope. - _, err = client.Read(tc.ctx, &pbresource.ReadRequest{Id: modFn(artist.Id, recordLabel.Id)}) + _, err = client.Read(tc.ctx, &pbresource.ReadRequest{Id: tenancyCase.modFn(artist.Id, recordLabel.Id)}) require.Error(t, err) require.Equal(t, codes.NotFound.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource not found") + require.ErrorContains(t, err, tenancyCase.errContains) }) } }) @@ -176,7 +261,7 @@ func TestRead_Success(t *testing.T) { demo.RegisterTypes(server.Registry) client := testClient(t, server) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) recordLabel, err = server.Backend.WriteCAS(tc.ctx, recordLabel) require.NoError(t, err) diff --git a/agent/grpc-external/services/resource/server.go b/agent/grpc-external/services/resource/server.go index 5fc5a01fafd7..1084fd3860a1 100644 --- a/agent/grpc-external/services/resource/server.go +++ b/agent/grpc-external/services/resource/server.go @@ -5,6 +5,7 @@ package resource import ( "context" + "strings" "github.com/hashicorp/go-hclog" "google.golang.org/grpc" @@ -129,16 +130,12 @@ func isGRPCStatusError(err error) bool { } func validateId(id *pbresource.ID, errorPrefix string) error { - var field string - switch { - case id.Type == nil: - field = "type" - case id.Name == "": - field = "name" + if id.Type == nil { + return status.Errorf(codes.InvalidArgument, "%s.type is required", errorPrefix) } - if field != "" { - return status.Errorf(codes.InvalidArgument, "%s.%s is required", errorPrefix, field) + if err := resource.ValidateName(id.Name); err != nil { + return status.Errorf(codes.InvalidArgument, "%s.name invalid: %v", errorPrefix, err) } // Better UX: Allow callers to pass in nil tenancy. Defaulting and inheritance of tenancy @@ -152,7 +149,61 @@ func validateId(id *pbresource.ID, errorPrefix string) error { } } - resource.Normalize(id.Tenancy) + if id.Tenancy.Partition != "" { + if err := resource.ValidateName(id.Tenancy.Partition); err != nil { + return status.Errorf(codes.InvalidArgument, "%s.tenancy.partition invalid: %v", errorPrefix, err) + } + } + if id.Tenancy.Namespace != "" { + if err := resource.ValidateName(id.Tenancy.Namespace); err != nil { + return status.Errorf(codes.InvalidArgument, "%s.tenancy.namespace invalid: %v", errorPrefix, err) + } + } + // TODO(spatel): NET-5475 - Remove as part of peer_name moving to PeerTenancy + if id.Tenancy.PeerName == "" { + id.Tenancy.PeerName = resource.DefaultPeerName + } + + return nil +} + +func validateRef(ref *pbresource.Reference, errorPrefix string) error { + if ref.Type == nil { + return status.Errorf(codes.InvalidArgument, "%s.type is required", errorPrefix) + } + if err := resource.ValidateName(ref.Name); err != nil { + return status.Errorf(codes.InvalidArgument, "%s.name invalid: %v", errorPrefix, err) + } + if err := resource.ValidateName(ref.Tenancy.Partition); err != nil { + return status.Errorf(codes.InvalidArgument, "%s.tenancy.partition invalid: %v", errorPrefix, err) + } + if err := resource.ValidateName(ref.Tenancy.Namespace); err != nil { + return status.Errorf(codes.InvalidArgument, "%s.tenancy.namespace invalid: %v", errorPrefix, err) + } + return nil +} + +func validateWildcardTenancy(tenancy *pbresource.Tenancy, namePrefix string) error { + // Partition has to be a valid name if not wildcard or empty + if tenancy.Partition != "" && tenancy.Partition != "*" { + if err := resource.ValidateName(tenancy.Partition); err != nil { + return status.Errorf(codes.InvalidArgument, "tenancy.partition invalid: %v", err) + } + } + + // Namespace has to be a valid name if not wildcard or empty + if tenancy.Namespace != "" && tenancy.Namespace != "*" { + if err := resource.ValidateName(tenancy.Namespace); err != nil { + return status.Errorf(codes.InvalidArgument, "tenancy.namespace invalid: %v", err) + } + } + + // Not doing a strict resource name validation here because the prefix can be + // something like "foo-" which is a valid prefix but not valid resource name. + // relax validation to just check for lowercasing + if namePrefix != strings.ToLower(namePrefix) { + return status.Errorf(codes.InvalidArgument, "name_prefix invalid: must be lowercase alphanumeric, got: %v", namePrefix) + } return nil } @@ -165,7 +216,7 @@ func v1TenancyExists(reg *resource.Registration, v1Bridge TenancyBridge, tenancy case err != nil: return err case !exists: - return status.Errorf(errCode, "partition resource not found: %v", tenancy.Partition) + return status.Errorf(errCode, "partition not found: %v", tenancy.Partition) } } @@ -175,7 +226,7 @@ func v1TenancyExists(reg *resource.Registration, v1Bridge TenancyBridge, tenancy case err != nil: return err case !exists: - return status.Errorf(errCode, "namespace resource not found: %v", tenancy.Namespace) + return status.Errorf(errCode, "namespace not found: %v", tenancy.Namespace) } } return nil diff --git a/agent/grpc-external/services/resource/server_test.go b/agent/grpc-external/services/resource/server_test.go index 99add6497121..ffe7df52c401 100644 --- a/agent/grpc-external/services/resource/server_test.go +++ b/agent/grpc-external/services/resource/server_test.go @@ -6,7 +6,6 @@ package resource import ( "context" "fmt" - "strings" "testing" "github.com/stretchr/testify/mock" @@ -166,14 +165,6 @@ func wildcardTenancyCases() map[string]struct { PeerName: "local", }, }, - "namespaced type with uppercase partition and namespace": { - typ: demo.TypeV2Artist, - tenancy: &pbresource.Tenancy{ - Partition: "DEFAULT", - Namespace: "DEFAULT", - PeerName: "local", - }, - }, "namespaced type with wildcard partition and empty namespace": { typ: demo.TypeV2Artist, tenancy: &pbresource.Tenancy{ @@ -198,14 +189,6 @@ func wildcardTenancyCases() map[string]struct { PeerName: "local", }, }, - "partitioned type with uppercase partition": { - typ: demo.TypeV1RecordLabel, - tenancy: &pbresource.Tenancy{ - Partition: "DEFAULT", - Namespace: "", - PeerName: "local", - }, - }, "partitioned type with wildcard partition": { typ: demo.TypeV1RecordLabel, tenancy: &pbresource.Tenancy{ @@ -224,12 +207,6 @@ func tenancyCases() map[string]func(artistId, recordlabelId *pbresource.ID) *pbr "namespaced resource provides nonempty partition and namespace": func(artistId, recordLabelId *pbresource.ID) *pbresource.ID { return artistId }, - "namespaced resource provides uppercase partition and namespace": func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Partition = strings.ToUpper(artistId.Tenancy.Partition) - id.Tenancy.Namespace = strings.ToUpper(artistId.Tenancy.Namespace) - return id - }, "namespaced resource inherits tokens partition when empty": func(artistId, _ *pbresource.ID) *pbresource.ID { id := clone(artistId) id.Tenancy.Partition = "" @@ -254,11 +231,6 @@ func tenancyCases() map[string]func(artistId, recordlabelId *pbresource.ID) *pbr "partitioned resource provides nonempty partition": func(_, recordLabelId *pbresource.ID) *pbresource.ID { return recordLabelId }, - "partitioned resource provides uppercase partition": func(_, recordLabelId *pbresource.ID) *pbresource.ID { - id := clone(recordLabelId) - id.Tenancy.Partition = strings.ToUpper(recordLabelId.Tenancy.Partition) - return id - }, "partitioned resource inherits tokens partition when empty": func(_, recordLabelId *pbresource.ID) *pbresource.ID { id := clone(recordLabelId) id.Tenancy.Partition = "" diff --git a/agent/grpc-external/services/resource/watch.go b/agent/grpc-external/services/resource/watch.go index f20d3f00f875..44b0a83caa94 100644 --- a/agent/grpc-external/services/resource/watch.go +++ b/agent/grpc-external/services/resource/watch.go @@ -110,8 +110,9 @@ func (s *Server) validateWatchListRequest(req *pbresource.WatchListRequest) (*re return nil, err } - // Lowercase - resource.Normalize(req.Tenancy) + if err := validateWildcardTenancy(req.Tenancy, req.NamePrefix); err != nil { + return nil, err + } // Error when partition scoped and namespace not empty. if reg.Scope == resource.ScopePartition && req.Tenancy.Namespace != "" { diff --git a/agent/grpc-external/services/resource/watch_test.go b/agent/grpc-external/services/resource/watch_test.go index 051264441bbc..5e5590d3f9fd 100644 --- a/agent/grpc-external/services/resource/watch_test.go +++ b/agent/grpc-external/services/resource/watch_test.go @@ -7,6 +7,7 @@ import ( "context" "errors" "io" + "strings" "testing" "time" @@ -27,24 +28,61 @@ import ( func TestWatchList_InputValidation(t *testing.T) { server := testServer(t) client := testClient(t, server) - demo.RegisterTypes(server.Registry) - testCases := map[string]func(*pbresource.WatchListRequest){ - "no type": func(req *pbresource.WatchListRequest) { req.Type = nil }, - "no tenancy": func(req *pbresource.WatchListRequest) { req.Tenancy = nil }, - "partitioned type provides non-empty namespace": func(req *pbresource.WatchListRequest) { - req.Type = demo.TypeV1RecordLabel - req.Tenancy.Namespace = "bad" + type testCase struct { + modFn func(*pbresource.WatchListRequest) + errContains string + } + + testCases := map[string]testCase{ + "no type": { + modFn: func(req *pbresource.WatchListRequest) { req.Type = nil }, + errContains: "type is required", + }, + "no tenancy": { + modFn: func(req *pbresource.WatchListRequest) { req.Tenancy = nil }, + errContains: "tenancy is required", + }, + "partition mixed case": { + modFn: func(req *pbresource.WatchListRequest) { req.Tenancy.Partition = "Default" }, + errContains: "tenancy.partition invalid", + }, + "partition too long": { + modFn: func(req *pbresource.WatchListRequest) { + req.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) + }, + errContains: "tenancy.partition invalid", + }, + "namespace mixed case": { + modFn: func(req *pbresource.WatchListRequest) { req.Tenancy.Namespace = "Default" }, + errContains: "tenancy.namespace invalid", + }, + "namespace too long": { + modFn: func(req *pbresource.WatchListRequest) { + req.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) + }, + errContains: "tenancy.namespace invalid", + }, + "name_prefix mixed case": { + modFn: func(req *pbresource.WatchListRequest) { req.NamePrefix = "Smashing" }, + errContains: "name_prefix invalid", + }, + "partitioned type provides non-empty namespace": { + modFn: func(req *pbresource.WatchListRequest) { + req.Type = demo.TypeV1RecordLabel + req.Tenancy.Namespace = "bad" + }, + errContains: "cannot have a namespace", }, } - for desc, modFn := range testCases { + for desc, tc := range testCases { t.Run(desc, func(t *testing.T) { req := &pbresource.WatchListRequest{ Type: demo.TypeV2Album, Tenancy: resource.DefaultNamespacedTenancy(), } - modFn(req) + tc.modFn(req) stream, err := client.WatchList(testContext(t), req) require.NoError(t, err) @@ -52,6 +90,7 @@ func TestWatchList_InputValidation(t *testing.T) { _, err = stream.Recv() require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, tc.errContains) }) } } @@ -136,7 +175,7 @@ func TestWatchList_Tenancy_Defaults_And_Normalization(t *testing.T) { rspCh := handleResourceStream(t, stream) // Testcase will pick one of recordLabel or artist based on scope of type. - recordLabel, err := demo.GenerateV1RecordLabel("LooneyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) artist, err := demo.GenerateV2Artist() require.NoError(t, err) diff --git a/agent/grpc-external/services/resource/write_status.go b/agent/grpc-external/services/resource/write_status.go index 0d3b68bb0876..993c8382e2c4 100644 --- a/agent/grpc-external/services/resource/write_status.go +++ b/agent/grpc-external/services/resource/write_status.go @@ -178,8 +178,17 @@ func (s *Server) validateWriteStatusRequest(req *pbresource.WriteStatusRequest) } } - // Lowercase - resource.Normalize(req.Id.Tenancy) + if err := validateId(req.Id, "id"); err != nil { + return nil, err + } + + for i, condition := range req.Status.Conditions { + if condition.Resource != nil { + if err := validateRef(condition.Resource, fmt.Sprintf("status.conditions[%d].resource", i)); err != nil { + return nil, err + } + } + } // Check type exists. reg, err := s.resolveType(req.Id.Type) diff --git a/agent/grpc-external/services/resource/write_status_test.go b/agent/grpc-external/services/resource/write_status_test.go index 5b71983475d9..1ddf73863236 100644 --- a/agent/grpc-external/services/resource/write_status_test.go +++ b/agent/grpc-external/services/resource/write_status_test.go @@ -74,64 +74,155 @@ func TestWriteStatus_InputValidation(t *testing.T) { demo.RegisterTypes(server.Registry) testCases := map[string]struct { - typ *pbresource.Type - modFn func(req *pbresource.WriteStatusRequest) + typ *pbresource.Type + modFn func(req *pbresource.WriteStatusRequest) + errContains string }{ "no id": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id = nil }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id = nil }, + errContains: "id is required", }, "no type": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Type = nil }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Type = nil }, + errContains: "id.type is required", }, "no name": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Name = "" }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Name = "" }, + errContains: "id.name is required", }, "no uid": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Uid = "" }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Uid = "" }, + errContains: "id.uid is required", + }, + "name mixed case": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Name = "U2" }, + errContains: "id.name invalid", + }, + "name too long": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { + req.Id.Name = strings.Repeat("a", resource.MaxNameLength+1) + }, + errContains: "id.name invalid", + }, + "partition mixed case": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "Default" }, + errContains: "id.tenancy.partition invalid", + }, + "partition too long": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { + req.Id.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) + }, + errContains: "id.tenancy.partition invalid", + }, + "namespace mixed case": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Namespace = "Default" }, + errContains: "id.tenancy.namespace invalid", + }, + "namespace too long": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { + req.Id.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) + }, + errContains: "id.tenancy.namespace invalid", }, "no key": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Key = "" }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Key = "" }, + errContains: "key is required", }, "no status": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status = nil }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Status = nil }, + errContains: "status is required", }, "no observed generation": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.ObservedGeneration = "" }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Status.ObservedGeneration = "" }, + errContains: "status.observed_generation is required", }, "bad observed generation": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.ObservedGeneration = "bogus" }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Status.ObservedGeneration = "bogus" }, + errContains: "status.observed_generation is not valid", }, "no condition type": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Type = "" }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Type = "" }, + errContains: "status.conditions[0].type is required", }, "no reference type": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Type = nil }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Type = nil }, + errContains: "status.conditions[0].resource.type is required", }, "no reference tenancy": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Tenancy = nil }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Tenancy = nil }, + errContains: "status.conditions[0].resource.tenancy is required", }, "no reference name": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Name = "" }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Name = "" }, + errContains: "status.conditions[0].resource.name is required", + }, + "reference name mixed case": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Name = "U2" }, + errContains: "status.conditions[0].resource.name invalid", + }, + "reference name too long": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { + req.Status.Conditions[0].Resource.Name = strings.Repeat("r", resource.MaxNameLength+1) + }, + errContains: "status.conditions[0].resource.name invalid", + }, + "reference partition mixed case": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { + req.Status.Conditions[0].Resource.Tenancy.Partition = "Default" + }, + errContains: "status.conditions[0].resource.tenancy.partition invalid", + }, + "reference partition too long": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { + req.Status.Conditions[0].Resource.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) + }, + errContains: "status.conditions[0].resource.tenancy.partition invalid", + }, + "reference namespace mixed case": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { + req.Status.Conditions[0].Resource.Tenancy.Namespace = "Default" + }, + errContains: "status.conditions[0].resource.tenancy.namespace invalid", + }, + "reference namespace too long": { + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { + req.Status.Conditions[0].Resource.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) + }, + errContains: "status.conditions[0].resource.tenancy.namespace invalid", }, "updated at provided": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.UpdatedAt = timestamppb.Now() }, + typ: demo.TypeV2Artist, + modFn: func(req *pbresource.WriteStatusRequest) { req.Status.UpdatedAt = timestamppb.Now() }, + errContains: "status.updated_at is automatically set and cannot be provided", }, "partition scoped type provides namespace in tenancy": { - typ: demo.TypeV1RecordLabel, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Namespace = "bad" }, + typ: demo.TypeV1RecordLabel, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Namespace = "bad" }, + errContains: "cannot have a namespace", }, } for desc, tc := range testCases { @@ -142,7 +233,7 @@ func TestWriteStatus_InputValidation(t *testing.T) { case resource.EqualType(demo.TypeV2Artist, tc.typ): res, err = demo.GenerateV2Artist() case resource.EqualType(demo.TypeV1RecordLabel, tc.typ): - res, err = demo.GenerateV1RecordLabel("Looney Tunes") + res, err = demo.GenerateV1RecordLabel("looney-tunes") default: t.Fatal("unsupported type", tc.typ) } @@ -157,6 +248,7 @@ func TestWriteStatus_InputValidation(t *testing.T) { _, err = client.WriteStatus(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, tc.errContains) }) } } @@ -210,13 +302,6 @@ func TestWriteStatus_Tenancy_Defaults(t *testing.T) { scope: resource.ScopeNamespace, modFn: func(req *pbresource.WriteStatusRequest) {}, }, - "namespaced resource provides uppercase partition and namespace": { - scope: resource.ScopeNamespace, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Id.Tenancy.Partition = strings.ToUpper(req.Id.Tenancy.Partition) - req.Id.Tenancy.Namespace = strings.ToUpper(req.Id.Tenancy.Namespace) - }, - }, "namespaced resource inherits tokens partition when empty": { scope: resource.ScopeNamespace, modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "" }, @@ -240,12 +325,6 @@ func TestWriteStatus_Tenancy_Defaults(t *testing.T) { scope: resource.ScopePartition, modFn: func(req *pbresource.WriteStatusRequest) {}, }, - "partitioned resource provides uppercase partition": { - scope: resource.ScopePartition, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Id.Tenancy.Partition = strings.ToUpper(req.Id.Tenancy.Partition) - }, - }, "partitioned resource inherits tokens partition when empty": { scope: resource.ScopePartition, modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "" }, @@ -263,7 +342,7 @@ func TestWriteStatus_Tenancy_Defaults(t *testing.T) { case resource.ScopeNamespace: res, err = demo.GenerateV2Artist() case resource.ScopePartition: - res, err = demo.GenerateV1RecordLabel("Looney Tunes") + res, err = demo.GenerateV1RecordLabel("looney-tunes") } require.NoError(t, err) @@ -280,7 +359,7 @@ func TestWriteStatus_Tenancy_Defaults(t *testing.T) { require.NoError(t, err) res = rsp.Resource - // Re-read resoruce and verify status successfully written (not nil) + // Re-read resource and verify status successfully written (not nil) _, err = client.Read(testContext(t), &pbresource.ReadRequest{Id: res.Id}) require.NoError(t, err) res = rsp.Resource @@ -327,7 +406,7 @@ func TestWriteStatus_Tenancy_NotFound(t *testing.T) { case resource.ScopeNamespace: res, err = demo.GenerateV2Artist() case resource.ScopePartition: - res, err = demo.GenerateV1RecordLabel("Looney Tunes") + res, err = demo.GenerateV1RecordLabel("looney-tunes") } require.NoError(t, err) diff --git a/agent/grpc-external/services/resource/write_test.go b/agent/grpc-external/services/resource/write_test.go index 3828ff9753f2..9f7704b52b97 100644 --- a/agent/grpc-external/services/resource/write_test.go +++ b/agent/grpc-external/services/resource/write_test.go @@ -29,54 +29,123 @@ import ( func TestWrite_InputValidation(t *testing.T) { server := testServer(t) client := testClient(t, server) - demo.RegisterTypes(server.Registry) - testCases := map[string]func(artist, recordLabel *pbresource.Resource) *pbresource.Resource{ - "no resource": func(artist, recordLabel *pbresource.Resource) *pbresource.Resource { return nil }, - "no id": func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id = nil - return artist + type testCase struct { + modFn func(artist, recordLabel *pbresource.Resource) *pbresource.Resource + errContains string + } + + testCases := map[string]testCase{ + "no resource": { + modFn: func(_, _ *pbresource.Resource) *pbresource.Resource { + return nil + }, + errContains: "resource is required", }, - "no type": func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Type = nil - return artist + "no id": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id = nil + return artist + }, + errContains: "resource.id is required", }, - "no name": func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Name = "" - return artist + "no type": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id.Type = nil + return artist + }, + errContains: "resource.id.type is required", }, - "wrong data type": func(artist, _ *pbresource.Resource) *pbresource.Resource { - var err error - artist.Data, err = anypb.New(&pbdemov2.Album{}) - require.NoError(t, err) - return artist + "no name": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id.Name = "" + return artist + }, + errContains: "resource.id.name invalid", + }, + "name is mixed case": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id.Name = "MixedCaseNotAllowed" + return artist + }, + errContains: "resource.id.name invalid", + }, + "name too long": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id.Name = strings.Repeat("a", resource.MaxNameLength+1) + return artist + }, + errContains: "resource.id.name invalid", + }, + "wrong data type": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + var err error + artist.Data, err = anypb.New(&pbdemov2.Album{}) + require.NoError(t, err) + return artist + }, + errContains: "resource.data is of wrong type", + }, + "partition is mixed case": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id.Tenancy.Partition = "Default" + return artist + }, + errContains: "resource.id.tenancy.partition invalid", + }, + "partition too long": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) + return artist + }, + errContains: "resource.id.tenancy.partition invalid", }, - "fail validation hook": func(artist, _ *pbresource.Resource) *pbresource.Resource { - buffer := &pbdemov2.Artist{} - require.NoError(t, artist.Data.UnmarshalTo(buffer)) - buffer.Name = "" // name cannot be empty - require.NoError(t, artist.Data.MarshalFrom(buffer)) - return artist + "namespace is mixed case": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id.Tenancy.Namespace = "Default" + return artist + }, + errContains: "resource.id.tenancy.namespace invalid", }, - "partition scope with non-empty namespace": func(_, recordLabel *pbresource.Resource) *pbresource.Resource { - recordLabel.Id.Tenancy.Namespace = "bogus" - return recordLabel + "namespace too long": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) + return artist + }, + errContains: "resource.id.tenancy.namespace invalid", + }, + "fail validation hook": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + buffer := &pbdemov2.Artist{} + require.NoError(t, artist.Data.UnmarshalTo(buffer)) + buffer.Name = "" // name cannot be empty + require.NoError(t, artist.Data.MarshalFrom(buffer)) + return artist + }, + errContains: "artist.name required", + }, + "partition scope with non-empty namespace": { + modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { + recordLabel.Id.Tenancy.Namespace = "bogus" + return recordLabel + }, + errContains: "cannot have a namespace", }, - // TODO(spatel): add cluster scope tests when we have an actual cluster scoped resource (e.g. partition) } - for desc, modFn := range testCases { + for desc, tc := range testCases { t.Run(desc, func(t *testing.T) { artist, err := demo.GenerateV2Artist() require.NoError(t, err) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) - req := &pbresource.WriteRequest{Resource: modFn(artist, recordLabel)} + req := &pbresource.WriteRequest{Resource: tc.modFn(artist, recordLabel)} _, err = client.Write(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, tc.errContains) }) } } @@ -84,7 +153,6 @@ func TestWrite_InputValidation(t *testing.T) { func TestWrite_OwnerValidation(t *testing.T) { server := testServer(t) client := testClient(t, server) - demo.RegisterTypes(server.Registry) type testCase struct { @@ -94,15 +162,49 @@ func TestWrite_OwnerValidation(t *testing.T) { testCases := map[string]testCase{ "no owner type": { modReqFn: func(req *pbresource.WriteRequest) { req.Resource.Owner.Type = nil }, - errorContains: "resource.owner.type", + errorContains: "resource.owner.type is required", }, "no owner tenancy": { modReqFn: func(req *pbresource.WriteRequest) { req.Resource.Owner.Tenancy = nil }, - errorContains: "resource.owner", + errorContains: "resource.owner does not exist", }, "no owner name": { modReqFn: func(req *pbresource.WriteRequest) { req.Resource.Owner.Name = "" }, - errorContains: "resource.owner.name", + errorContains: "resource.owner.name invalid", + }, + "mixed case owner name": { + modReqFn: func(req *pbresource.WriteRequest) { req.Resource.Owner.Name = strings.ToUpper(req.Resource.Owner.Name) }, + errorContains: "resource.owner.name invalid", + }, + "owner name too long": { + modReqFn: func(req *pbresource.WriteRequest) { + req.Resource.Owner.Name = strings.Repeat("a", resource.MaxNameLength+1) + }, + errorContains: "resource.owner.name invalid", + }, + "owner partition is mixed case": { + modReqFn: func(req *pbresource.WriteRequest) { + req.Resource.Owner.Tenancy.Partition = "Default" + }, + errorContains: "resource.owner.tenancy.partition invalid", + }, + "owner partition too long": { + modReqFn: func(req *pbresource.WriteRequest) { + req.Resource.Owner.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) + }, + errorContains: "resource.owner.tenancy.partition invalid", + }, + "owner namespace is mixed case": { + modReqFn: func(req *pbresource.WriteRequest) { + req.Resource.Owner.Tenancy.Namespace = "Default" + }, + errorContains: "resource.owner.tenancy.namespace invalid", + }, + "owner namespace too long": { + modReqFn: func(req *pbresource.WriteRequest) { + req.Resource.Owner.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) + }, + errorContains: "resource.owner.tenancy.namespace invalid", }, } for desc, tc := range testCases { @@ -215,14 +317,6 @@ func TestWrite_Create_Success(t *testing.T) { }, expectedTenancy: resource.DefaultNamespacedTenancy(), }, - "namespaced resource provides uppercase partition and namespace": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Partition = strings.ToUpper(artist.Id.Tenancy.Partition) - artist.Id.Tenancy.Namespace = strings.ToUpper(artist.Id.Tenancy.Namespace) - return artist - }, - expectedTenancy: resource.DefaultNamespacedTenancy(), - }, "namespaced resource inherits tokens partition when empty": { modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { artist.Id.Tenancy.Partition = "" @@ -266,13 +360,6 @@ func TestWrite_Create_Success(t *testing.T) { }, expectedTenancy: resource.DefaultPartitionedTenancy(), }, - "partitioned resource provides uppercase partition": { - modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { - recordLabel.Id.Tenancy.Partition = strings.ToUpper(recordLabel.Id.Tenancy.Partition) - return recordLabel - }, - expectedTenancy: resource.DefaultPartitionedTenancy(), - }, "partitioned resource inherits tokens partition when empty": { modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { recordLabel.Id.Tenancy.Partition = "" @@ -303,7 +390,7 @@ func TestWrite_Create_Success(t *testing.T) { client := testClient(t, server) demo.RegisterTypes(server.Registry) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) artist, err := demo.GenerateV2Artist() @@ -331,7 +418,7 @@ func TestWrite_Create_Tenancy_NotFound(t *testing.T) { return artist }, errCode: codes.InvalidArgument, - errContains: "partition", + errContains: "partition not found", }, "namespaced resource provides nonexistant namespace": { modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { @@ -339,7 +426,7 @@ func TestWrite_Create_Tenancy_NotFound(t *testing.T) { return artist }, errCode: codes.InvalidArgument, - errContains: "namespace", + errContains: "namespace not found", }, "partitioned resource provides nonexistant partition": { modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { @@ -347,7 +434,7 @@ func TestWrite_Create_Tenancy_NotFound(t *testing.T) { return recordLabel }, errCode: codes.InvalidArgument, - errContains: "partition", + errContains: "partition not found", }, } for desc, tc := range testCases { @@ -356,7 +443,7 @@ func TestWrite_Create_Tenancy_NotFound(t *testing.T) { client := testClient(t, server) demo.RegisterTypes(server.Registry) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) artist, err := demo.GenerateV2Artist() @@ -378,22 +465,22 @@ func TestWrite_Tenancy_MarkedForDeletion(t *testing.T) { }{ "namespaced resources partition marked for deletion": { modFn: func(artist, _ *pbresource.Resource, mockTenancyBridge *MockTenancyBridge) *pbresource.Resource { - mockTenancyBridge.On("IsPartitionMarkedForDeletion", "part1").Return(true, nil) + mockTenancyBridge.On("IsPartitionMarkedForDeletion", "ap1").Return(true, nil) return artist }, errContains: "partition marked for deletion", }, "namespaced resources namespace marked for deletion": { modFn: func(artist, _ *pbresource.Resource, mockTenancyBridge *MockTenancyBridge) *pbresource.Resource { - mockTenancyBridge.On("IsPartitionMarkedForDeletion", "part1").Return(false, nil) - mockTenancyBridge.On("IsNamespaceMarkedForDeletion", "part1", "ns1").Return(true, nil) + mockTenancyBridge.On("IsPartitionMarkedForDeletion", "ap1").Return(false, nil) + mockTenancyBridge.On("IsNamespaceMarkedForDeletion", "ap1", "ns1").Return(true, nil) return artist }, errContains: "namespace marked for deletion", }, "partitioned resources partition marked for deletion": { modFn: func(_, recordLabel *pbresource.Resource, mockTenancyBridge *MockTenancyBridge) *pbresource.Resource { - mockTenancyBridge.On("IsPartitionMarkedForDeletion", "part1").Return(true, nil) + mockTenancyBridge.On("IsPartitionMarkedForDeletion", "ap1").Return(true, nil) return recordLabel }, errContains: "partition marked for deletion", @@ -404,18 +491,18 @@ func TestWrite_Tenancy_MarkedForDeletion(t *testing.T) { server := testServer(t) client := testClient(t, server) demo.RegisterTypes(server.Registry) - recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") require.NoError(t, err) - recordLabel.Id.Tenancy.Partition = "part1" + recordLabel.Id.Tenancy.Partition = "ap1" artist, err := demo.GenerateV2Artist() require.NoError(t, err) - artist.Id.Tenancy.Partition = "part1" + artist.Id.Tenancy.Partition = "ap1" artist.Id.Tenancy.Namespace = "ns1" mockTenancyBridge := &MockTenancyBridge{} - mockTenancyBridge.On("PartitionExists", "part1").Return(true, nil) - mockTenancyBridge.On("NamespaceExists", "part1", "ns1").Return(true, nil) + mockTenancyBridge.On("PartitionExists", "ap1").Return(true, nil) + mockTenancyBridge.On("NamespaceExists", "ap1", "ns1").Return(true, nil) server.TenancyBridge = mockTenancyBridge _, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: tc.modFn(artist, recordLabel, mockTenancyBridge)}) diff --git a/internal/mesh/internal/controllers/xds/controller_test.go b/internal/mesh/internal/controllers/xds/controller_test.go index 6bb5f85c9908..8d4799e2da28 100644 --- a/internal/mesh/internal/controllers/xds/controller_test.go +++ b/internal/mesh/internal/controllers/xds/controller_test.go @@ -237,7 +237,10 @@ func (suite *xdsControllerTestSuite) TestReconcile_ReadEndpointError() { require.Error(suite.T(), err) // Assert on the status reflecting endpoint couldn't be read. - suite.client.RequireStatusCondition(suite.T(), fooProxyStateTemplate.Id, ControllerName, status.ConditionRejectedErrorReadingEndpoints(status.KeyFromID(badID), "rpc error: code = InvalidArgument desc = id.name is required")) + suite.client.RequireStatusCondition(suite.T(), fooProxyStateTemplate.Id, ControllerName, status.ConditionRejectedErrorReadingEndpoints( + status.KeyFromID(badID), + "rpc error: code = InvalidArgument desc = id.name invalid: a resource name must consist of lower case alphanumeric characters or '-', must start and end with an alphanumeric character and be less than 64 characters, got: \"\"", + )) } // This test is a happy path creation test to make sure pbproxystate.Endpoints are created in the computed diff --git a/internal/resource/demo/controller.go b/internal/resource/demo/controller.go index 7f1bba902ea5..a8757fcae262 100644 --- a/internal/resource/demo/controller.go +++ b/internal/resource/demo/controller.go @@ -71,7 +71,7 @@ func (r *artistReconciler) Reconcile(ctx context.Context, rt controller.Runtime, actualAlbums, err := rt.Client.List(ctx, &pbresource.ListRequest{ Type: TypeV2Album, Tenancy: res.Id.Tenancy, - NamePrefix: fmt.Sprintf("%s/", res.Id.Name), + NamePrefix: fmt.Sprintf("%s-", res.Id.Name), }) if err != nil { return err diff --git a/internal/resource/demo/demo.go b/internal/resource/demo/demo.go index 8e978c9fb49a..12fced6718e6 100644 --- a/internal/resource/demo/demo.go +++ b/internal/resource/demo/demo.go @@ -354,7 +354,7 @@ func generateV2Album(artistID *pbresource.ID, rand *rand.Rand) (*pbresource.Reso Id: &pbresource.ID{ Type: TypeV2Album, Tenancy: clone(artistID.Tenancy), - Name: fmt.Sprintf("%s/%s-%s", artistID.Name, strings.ToLower(adjective), strings.ToLower(noun)), + Name: fmt.Sprintf("%s-%s-%s", artistID.Name, strings.ToLower(adjective), strings.ToLower(noun)), }, Owner: artistID, Data: data, diff --git a/internal/resource/http/http_test.go b/internal/resource/http/http_test.go index 50f50fbe3948..46d2af363590 100644 --- a/internal/resource/http/http_test.go +++ b/internal/resource/http/http_test.go @@ -42,7 +42,7 @@ func TestResourceHandler_InputValidation(t *testing.T) { request *http.Request response *httptest.ResponseRecorder expectedResponseCode int - expectedErrorMessage string + responseBodyContains string } client := svctest.RunResourceService(t, demo.RegisterTypes) resourceHandler := resourceHandler{ @@ -72,7 +72,7 @@ func TestResourceHandler_InputValidation(t *testing.T) { `)), response: httptest.NewRecorder(), expectedResponseCode: http.StatusBadRequest, - expectedErrorMessage: "rpc error: code = InvalidArgument desc = resource.id.name is required", + responseBodyContains: "resource.id.name invalid", }, { description: "wrong schema", @@ -89,21 +89,21 @@ func TestResourceHandler_InputValidation(t *testing.T) { `)), response: httptest.NewRecorder(), expectedResponseCode: http.StatusBadRequest, - expectedErrorMessage: "Request body didn't follow the resource schema", + responseBodyContains: "Request body didn't follow the resource schema", }, { description: "invalid request body", request: httptest.NewRequest("PUT", "/keith-urban?partition=default&peer_name=local&namespace=default", strings.NewReader("bad-input")), response: httptest.NewRecorder(), expectedResponseCode: http.StatusBadRequest, - expectedErrorMessage: "Request body format is invalid", + responseBodyContains: "Request body format is invalid", }, { description: "no id", request: httptest.NewRequest("DELETE", "/?partition=default&peer_name=local&namespace=default", strings.NewReader("")), response: httptest.NewRecorder(), expectedResponseCode: http.StatusBadRequest, - expectedErrorMessage: "rpc error: code = InvalidArgument desc = id.name is required", + responseBodyContains: "id.name invalid", }, } @@ -119,7 +119,7 @@ func TestResourceHandler_InputValidation(t *testing.T) { require.NoError(t, err) require.Equal(t, tc.expectedResponseCode, tc.response.Result().StatusCode) - require.Equal(t, tc.expectedErrorMessage, string(b)) + require.Contains(t, string(b), tc.responseBodyContains) }) } } diff --git a/internal/resource/resource.go b/internal/resource/resource.go new file mode 100644 index 000000000000..b5100f002955 --- /dev/null +++ b/internal/resource/resource.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package resource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/consul/agent/dns" +) + +const MaxNameLength = 63 + +// ValidateName returns an error a name is not a valid resource name. +// The error will contain reference to what constitutes a valid resource name. +func ValidateName(name string) error { + if !dns.IsValidLabel(name) || strings.ToLower(name) != name || len(name) > MaxNameLength { + return fmt.Errorf("a resource name must consist of lower case alphanumeric characters or '-', must start and end with an alphanumeric character and be less than %d characters, got: %q", MaxNameLength+1, name) + } + return nil +} diff --git a/internal/resource/tenancy.go b/internal/resource/tenancy.go index 126e12413f6a..99756d503ff3 100644 --- a/internal/resource/tenancy.go +++ b/internal/resource/tenancy.go @@ -5,7 +5,6 @@ package resource import ( "fmt" - "strings" "google.golang.org/protobuf/proto" @@ -62,20 +61,6 @@ func (s Scope) String() string { panic(fmt.Sprintf("string mapping missing for scope %v", int(s))) } -// Normalize lowercases the partition and namespace. -func Normalize(tenancy *pbresource.Tenancy) { - if tenancy == nil { - return - } - tenancy.Partition = strings.ToLower(tenancy.Partition) - tenancy.Namespace = strings.ToLower(tenancy.Namespace) - - // TODO(spatel): NET-5475 - Remove as part of peer_name moving to PeerTenancy - if tenancy.PeerName == "" { - tenancy.PeerName = DefaultPeerName - } -} - // DefaultClusteredTenancy returns the default tenancy for a cluster scoped resource. func DefaultClusteredTenancy() *pbresource.Tenancy { return &pbresource.Tenancy{ @@ -156,7 +141,6 @@ func defaultTenancy(itemTenancy, parentTenancy, scopeTenancy *pbresource.Tenancy if itemTenancy.PeerName == "" { itemTenancy.PeerName = DefaultPeerName } - Normalize(itemTenancy) if parentTenancy != nil { // Recursively normalize this tenancy as well. @@ -167,7 +151,6 @@ func defaultTenancy(itemTenancy, parentTenancy, scopeTenancy *pbresource.Tenancy if parentTenancy == nil { parentTenancy = scopeTenancy } - Normalize(parentTenancy) if !equalOrEmpty(itemTenancy.PeerName, DefaultPeerName) { panic("peering is not supported yet for resource tenancies") diff --git a/internal/tenancy/internal/types/namespace_test.go b/internal/tenancy/internal/types/namespace_test.go index b64f86d5212b..10d70ba82480 100644 --- a/internal/tenancy/internal/types/namespace_test.go +++ b/internal/tenancy/internal/types/namespace_test.go @@ -6,13 +6,14 @@ package types import ( "context" "errors" + "testing" + svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" rtest "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto/private/prototest" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "testing" "github.com/stretchr/testify/require" "google.golang.org/protobuf/reflect/protoreflect" @@ -188,19 +189,6 @@ func TestDelete_Success(t *testing.T) { } -func TestRead_MixedCases_Success(t *testing.T) { - client := svctest.RunResourceService(t, Register) - client = rtest.NewClient(client) - - res := rtest.Resource(NamespaceType, "nS1"). - WithData(t, validNamespace()).Write(t, client) - - readRsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: res.Id}) - require.NoError(t, err) - prototest.AssertDeepEqual(t, res.Id, readRsp.Resource.Id) - -} - func validNamespace() *pbtenancy.Namespace { return &pbtenancy.Namespace{ Description: "ns namespace", From df8ea430c6845be94aa9ffabfe7c684b9143c430 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:44:24 -0500 Subject: [PATCH 011/130] mesh: add DestinationPolicy ACL hook tenancy tests (#19178) Enhance the DestinationPolicy ACL hook tests to cover tenanted situations. These tests will only execute in enterprise. --- .../internal/types/destination_policy_test.go | 151 +++++++++--------- internal/resource/resourcetest/acls.go | 15 +- 2 files changed, 83 insertions(+), 83 deletions(-) diff --git a/internal/mesh/internal/types/destination_policy_test.go b/internal/mesh/internal/types/destination_policy_test.go index c2f89eeb0a9f..8edb51b2d831 100644 --- a/internal/mesh/internal/types/destination_policy_test.go +++ b/internal/mesh/internal/types/destination_policy_test.go @@ -4,13 +4,13 @@ package types import ( + "fmt" "testing" "time" "github.com/stretchr/testify/require" "google.golang.org/protobuf/types/known/durationpb" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" @@ -518,99 +518,92 @@ func TestDestinationPolicyACLs(t *testing.T) { registry := resource.NewRegistry() Register(registry) - type testcase struct { - rules string - check func(t *testing.T, authz acl.Authorizer, res *pbresource.Resource) - readOK string - writeOK string - listOK string + newPolicy := func(t *testing.T, tenancyStr string) *pbresource.Resource { + res := resourcetest.Resource(pbmesh.DestinationPolicyType, "api"). + WithTenancy(resourcetest.Tenancy(tenancyStr)). + WithData(t, &pbmesh.DestinationPolicy{ + PortConfigs: map[string]*pbmesh.DestinationConfig{ + "http": { + ConnectTimeout: durationpb.New(55 * time.Second), + }, + }, + }). + Build() + resourcetest.ValidateAndNormalize(t, registry, res) + return res } const ( - DENY = "deny" - ALLOW = "allow" - DEFAULT = "default" + DENY = resourcetest.DENY + ALLOW = resourcetest.ALLOW + DEFAULT = resourcetest.DEFAULT ) - checkF := func(t *testing.T, expect string, got error) { - switch expect { - case ALLOW: - if acl.IsErrPermissionDenied(got) { - t.Fatal("should be allowed") - } - case DENY: - if !acl.IsErrPermissionDenied(got) { - t.Fatal("should be denied") - } - case DEFAULT: - require.Nil(t, got, "expected fallthrough decision") - default: - t.Fatalf("unexpected expectation: %q", expect) - } + run := func(t *testing.T, name string, tc resourcetest.ACLTestCase) { + t.Run(name, func(t *testing.T) { + resourcetest.RunACLTestCase(t, tc, registry) + }) } - reg, ok := registry.Resolve(pbmesh.DestinationPolicyType) - require.True(t, ok) + isEnterprise := (structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty() == "default") - run := func(t *testing.T, tc testcase) { - destData := &pbmesh.DestinationPolicy{ - PortConfigs: map[string]*pbmesh.DestinationConfig{ - "http": { - ConnectTimeout: durationpb.New(55 * time.Second), - }, - }, + serviceRead := func(partition, namespace, name string) string { + if isEnterprise { + return fmt.Sprintf(` partition %q { namespace %q { service %q { policy = "read" } } }`, partition, namespace, name) } - res := resourcetest.Resource(pbmesh.DestinationPolicyType, "api"). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, destData). - Build() - resourcetest.ValidateAndNormalize(t, registry, res) - - config := acl.Config{ - WildcardName: structs.WildcardSpecifier, + return fmt.Sprintf(` service %q { policy = "read" } `, name) + } + serviceWrite := func(partition, namespace, name string) string { + if isEnterprise { + return fmt.Sprintf(` partition %q { namespace %q { service %q { policy = "write" } } }`, partition, namespace, name) } - authz, err := acl.NewAuthorizerFromRules(tc.rules, &config, nil) - require.NoError(t, err) - authz = acl.NewChainedAuthorizer([]acl.Authorizer{authz, acl.DenyAll()}) + return fmt.Sprintf(` service %q { policy = "write" } `, name) + } - t.Run("read", func(t *testing.T) { - err := reg.ACLs.Read(authz, &acl.AuthorizerContext{}, res.Id, nil) - checkF(t, tc.readOK, err) - }) - t.Run("write", func(t *testing.T) { - err := reg.ACLs.Write(authz, &acl.AuthorizerContext{}, res) - checkF(t, tc.writeOK, err) - }) - t.Run("list", func(t *testing.T) { - err := reg.ACLs.List(authz, &acl.AuthorizerContext{}) - checkF(t, tc.listOK, err) - }) + assert := func(t *testing.T, name string, rules string, res *pbresource.Resource, readOK, writeOK string) { + tc := resourcetest.ACLTestCase{ + AuthCtx: resource.AuthorizerContext(res.Id.Tenancy), + Rules: rules, + Res: res, + ReadOK: readOK, + WriteOK: writeOK, + ListOK: DEFAULT, + } + run(t, name, tc) } - cases := map[string]testcase{ - "no rules": { - rules: ``, - readOK: DENY, - writeOK: DENY, - listOK: DEFAULT, - }, - "service api read": { - rules: `service "api" { policy = "read" }`, - readOK: ALLOW, - writeOK: DENY, - listOK: DEFAULT, - }, - "service api write": { - rules: `service "api" { policy = "write" }`, - readOK: ALLOW, - writeOK: ALLOW, - listOK: DEFAULT, - }, + tenancies := []string{"default.default"} + if isEnterprise { + tenancies = append(tenancies, "default.foo", "alpha.default", "alpha.foo") } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - run(t, tc) + for _, policyTenancyStr := range tenancies { + t.Run("policy tenancy: "+policyTenancyStr, func(t *testing.T) { + for _, aclTenancyStr := range tenancies { + t.Run("acl tenancy: "+aclTenancyStr, func(t *testing.T) { + aclTenancy := resourcetest.Tenancy(aclTenancyStr) + + maybe := func(match string) string { + if policyTenancyStr != aclTenancyStr { + return DENY + } + return match + } + + t.Run("no rules", func(t *testing.T) { + rules := `` + assert(t, "any", rules, newPolicy(t, policyTenancyStr), DENY, DENY) + }) + t.Run("api:read", func(t *testing.T) { + rules := serviceRead(aclTenancy.Partition, aclTenancy.Namespace, "api") + assert(t, "any", rules, newPolicy(t, policyTenancyStr), maybe(ALLOW), DENY) + }) + t.Run("api:write", func(t *testing.T) { + rules := serviceWrite(aclTenancy.Partition, aclTenancy.Namespace, "api") + assert(t, "any", rules, newPolicy(t, policyTenancyStr), maybe(ALLOW), maybe(ALLOW)) + }) + }) + } }) } } diff --git a/internal/resource/resourcetest/acls.go b/internal/resource/resourcetest/acls.go index 4ce8cc9d7ae0..3f77c7fec490 100644 --- a/internal/resource/resourcetest/acls.go +++ b/internal/resource/resourcetest/acls.go @@ -41,6 +41,9 @@ var checkF = func(t *testing.T, expect string, got error) { type ACLTestCase struct { Rules string + // AuthCtx is optional. If not provided an empty one will be used. + AuthCtx *acl.AuthorizerContext + // One of either Res or Data/Owner/Typ should be set. Res *pbresource.Resource Data protoreflect.ProtoMessage @@ -92,21 +95,25 @@ func RunACLTestCase(t *testing.T, tc ACLTestCase, registry resource.Registry) { require.NoError(t, err) authz = acl.NewChainedAuthorizer([]acl.Authorizer{authz, acl.DenyAll()}) + if tc.AuthCtx == nil { + tc.AuthCtx = &acl.AuthorizerContext{} + } + if tc.ReadHookRequiresResource { - err = reg.ACLs.Read(authz, &acl.AuthorizerContext{}, res.Id, nil) + err = reg.ACLs.Read(authz, tc.AuthCtx, res.Id, nil) require.ErrorIs(t, err, resource.ErrNeedResource, "read hook should require the data payload") } t.Run("read", func(t *testing.T) { - err := reg.ACLs.Read(authz, &acl.AuthorizerContext{}, res.Id, res) + err := reg.ACLs.Read(authz, tc.AuthCtx, res.Id, res) checkF(t, tc.ReadOK, err) }) t.Run("write", func(t *testing.T) { - err := reg.ACLs.Write(authz, &acl.AuthorizerContext{}, res) + err := reg.ACLs.Write(authz, tc.AuthCtx, res) checkF(t, tc.WriteOK, err) }) t.Run("list", func(t *testing.T) { - err := reg.ACLs.List(authz, &acl.AuthorizerContext{}) + err := reg.ACLs.List(authz, tc.AuthCtx) checkF(t, tc.ListOK, err) }) } From 6741392a4f0fae151682acb10f8e507a492f3230 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 16 Oct 2023 14:05:39 -0500 Subject: [PATCH 012/130] catalog: add FailoverPolicy ACL hook tenancy test (#19179) --- .../internal/types/failover_policy_test.go | 202 ++++++++++++------ 1 file changed, 140 insertions(+), 62 deletions(-) diff --git a/internal/catalog/internal/types/failover_policy_test.go b/internal/catalog/internal/types/failover_policy_test.go index 923d260580e0..5628ed57741c 100644 --- a/internal/catalog/internal/types/failover_policy_test.go +++ b/internal/catalog/internal/types/failover_policy_test.go @@ -4,12 +4,14 @@ package types import ( - "strings" + "fmt" "testing" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" @@ -138,7 +140,7 @@ func TestMutateFailoverPolicy(t *testing.T) { }, }, "dest ref tenancy defaulting": { - policyTenancy: newTestTenancy("foo.bar"), + policyTenancy: resourcetest.Tenancy("foo.bar"), failover: &pbcatalog.FailoverPolicy{ Config: &pbcatalog.FailoverConfig{ Mode: pbcatalog.FailoverMode_FAILOVER_MODE_SEQUENTIAL, @@ -683,54 +685,149 @@ func TestFailoverPolicyACLs(t *testing.T) { registry := resource.NewRegistry() Register(registry) - failoverData := &pbcatalog.FailoverPolicy{ - Config: &pbcatalog.FailoverConfig{ - Destinations: []*pbcatalog.FailoverDestination{ - {Ref: newRef(pbcatalog.ServiceType, "api-backup")}, - }, - }, + newFailover := func(t *testing.T, name, tenancyStr string, destRefs []*pbresource.Reference) []*pbresource.Resource { + var dr []*pbcatalog.FailoverDestination + for _, destRef := range destRefs { + dr = append(dr, &pbcatalog.FailoverDestination{Ref: destRef}) + } + + res1 := resourcetest.Resource(pbcatalog.FailoverPolicyType, name). + WithTenancy(resourcetest.Tenancy(tenancyStr)). + WithData(t, &pbcatalog.FailoverPolicy{ + Config: &pbcatalog.FailoverConfig{Destinations: dr}, + }). + Build() + resourcetest.ValidateAndNormalize(t, registry, res1) + + res2 := resourcetest.Resource(pbcatalog.FailoverPolicyType, name). + WithTenancy(resourcetest.Tenancy(tenancyStr)). + WithData(t, &pbcatalog.FailoverPolicy{ + PortConfigs: map[string]*pbcatalog.FailoverConfig{ + "http": {Destinations: dr}, + }, + }). + Build() + resourcetest.ValidateAndNormalize(t, registry, res2) + + return []*pbresource.Resource{res1, res2} } - cases := map[string]resourcetest.ACLTestCase{ - "no rules": { - Rules: ``, - Data: failoverData, - Typ: pbcatalog.FailoverPolicyType, - ReadOK: resourcetest.DENY, - WriteOK: resourcetest.DENY, - ListOK: resourcetest.DEFAULT, - }, - "service test read": { - Rules: `service "test" { policy = "read" }`, - Data: failoverData, - Typ: pbcatalog.FailoverPolicyType, - ReadOK: resourcetest.ALLOW, - WriteOK: resourcetest.DENY, - ListOK: resourcetest.DEFAULT, - }, - "service test write": { - Rules: `service "test" { policy = "write" }`, - Data: failoverData, - Typ: pbcatalog.FailoverPolicyType, - ReadOK: resourcetest.ALLOW, - WriteOK: resourcetest.DENY, - ListOK: resourcetest.DEFAULT, - }, - "service test write and api-backup read": { - Rules: `service "test" { policy = "write" } service "api-backup" { policy = "read" }`, - Data: failoverData, - Typ: pbcatalog.FailoverPolicyType, - ReadOK: resourcetest.ALLOW, - WriteOK: resourcetest.ALLOW, - ListOK: resourcetest.DEFAULT, - }, + type testcase struct { + res *pbresource.Resource + rules string + check func(t *testing.T, authz acl.Authorizer, res *pbresource.Resource) + readOK string + writeOK string } - for name, tc := range cases { + const ( + DENY = resourcetest.DENY + ALLOW = resourcetest.ALLOW + DEFAULT = resourcetest.DEFAULT + ) + + serviceRef := func(tenancy, name string) *pbresource.Reference { + return newRefWithTenancy(pbcatalog.ServiceType, tenancy, name) + } + + resOneDest := func(tenancy, destTenancy string) []*pbresource.Resource { + return newFailover(t, "api", tenancy, []*pbresource.Reference{ + serviceRef(destTenancy, "dest1"), + }) + } + + resTwoDests := func(tenancy, destTenancy string) []*pbresource.Resource { + return newFailover(t, "api", tenancy, []*pbresource.Reference{ + serviceRef(destTenancy, "dest1"), + serviceRef(destTenancy, "dest2"), + }) + } + + run := func(t *testing.T, name string, tc resourcetest.ACLTestCase) { t.Run(name, func(t *testing.T) { resourcetest.RunACLTestCase(t, tc, registry) }) } + + isEnterprise := (structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty() == "default") + + serviceRead := func(partition, namespace, name string) string { + if isEnterprise { + return fmt.Sprintf(` partition %q { namespace %q { service %q { policy = "read" } } }`, partition, namespace, name) + } + return fmt.Sprintf(` service %q { policy = "read" } `, name) + } + serviceWrite := func(partition, namespace, name string) string { + if isEnterprise { + return fmt.Sprintf(` partition %q { namespace %q { service %q { policy = "write" } } }`, partition, namespace, name) + } + return fmt.Sprintf(` service %q { policy = "write" } `, name) + } + + assert := func(t *testing.T, name string, rules string, resList []*pbresource.Resource, readOK, writeOK string) { + for i, res := range resList { + tc := resourcetest.ACLTestCase{ + AuthCtx: resource.AuthorizerContext(res.Id.Tenancy), + Res: res, + Rules: rules, + ReadOK: readOK, + WriteOK: writeOK, + ListOK: DEFAULT, + } + run(t, fmt.Sprintf("%s-%d", name, i), tc) + } + } + + tenancies := []string{"default.default"} + if isEnterprise { + tenancies = append(tenancies, "default.foo", "alpha.default", "alpha.foo") + } + + for _, policyTenancyStr := range tenancies { + t.Run("policy tenancy: "+policyTenancyStr, func(t *testing.T) { + for _, destTenancyStr := range tenancies { + t.Run("dest tenancy: "+destTenancyStr, func(t *testing.T) { + for _, aclTenancyStr := range tenancies { + t.Run("acl tenancy: "+aclTenancyStr, func(t *testing.T) { + aclTenancy := resourcetest.Tenancy(aclTenancyStr) + + maybe := func(match string, parentOnly bool) string { + if policyTenancyStr != aclTenancyStr { + return DENY + } + if !parentOnly && destTenancyStr != aclTenancyStr { + return DENY + } + return match + } + + t.Run("no rules", func(t *testing.T) { + rules := `` + assert(t, "1dest", rules, resOneDest(policyTenancyStr, destTenancyStr), DENY, DENY) + assert(t, "2dests", rules, resTwoDests(policyTenancyStr, destTenancyStr), DENY, DENY) + }) + t.Run("api:read", func(t *testing.T) { + rules := serviceRead(aclTenancy.Partition, aclTenancy.Namespace, "api") + assert(t, "1dest", rules, resOneDest(policyTenancyStr, destTenancyStr), maybe(ALLOW, true), DENY) + assert(t, "2dests", rules, resTwoDests(policyTenancyStr, destTenancyStr), maybe(ALLOW, true), DENY) + }) + t.Run("api:write", func(t *testing.T) { + rules := serviceWrite(aclTenancy.Partition, aclTenancy.Namespace, "api") + assert(t, "1dest", rules, resOneDest(policyTenancyStr, destTenancyStr), maybe(ALLOW, true), DENY) + assert(t, "2dests", rules, resTwoDests(policyTenancyStr, destTenancyStr), maybe(ALLOW, true), DENY) + }) + t.Run("api:write dest1:read", func(t *testing.T) { + rules := serviceWrite(aclTenancy.Partition, aclTenancy.Namespace, "api") + + serviceRead(aclTenancy.Partition, aclTenancy.Namespace, "dest1") + assert(t, "1dest", rules, resOneDest(policyTenancyStr, destTenancyStr), maybe(ALLOW, true), maybe(ALLOW, false)) + assert(t, "2dests", rules, resTwoDests(policyTenancyStr, destTenancyStr), maybe(ALLOW, true), DENY) + }) + }) + } + }) + } + }) + } } func newRef(typ *pbresource.Type, name string) *pbresource.Reference { @@ -741,7 +838,7 @@ func newRef(typ *pbresource.Type, name string) *pbresource.Reference { func newRefWithTenancy(typ *pbresource.Type, tenancyStr, name string) *pbresource.Reference { return resourcetest.Resource(typ, name). - WithTenancy(newTestTenancy(tenancyStr)). + WithTenancy(resourcetest.Tenancy(tenancyStr)). Reference("") } @@ -750,22 +847,3 @@ func newRefWithPeer(typ *pbresource.Type, name string, peer string) *pbresource. ref.Tenancy.PeerName = peer return ref } - -func newTestTenancy(s string) *pbresource.Tenancy { - parts := strings.Split(s, ".") - switch len(parts) { - case 0: - return resource.DefaultClusteredTenancy() - case 1: - v := resource.DefaultPartitionedTenancy() - v.Partition = parts[0] - return v - case 2: - v := resource.DefaultNamespacedTenancy() - v.Partition = parts[0] - v.Namespace = parts[1] - return v - default: - return &pbresource.Tenancy{Partition: "BAD", Namespace: "BAD", PeerName: "BAD"} - } -} From dcd593004ecbdf6dd00f5fe242b7cf998a4d3348 Mon Sep 17 00:00:00 2001 From: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:52:30 -0700 Subject: [PATCH 013/130] docs: Multi-port corrections (#19224) * typo fixes and instruction corrections * typo * link path correction --- .../content/docs/k8s/multiport/configure.mdx | 41 ++++++++++--------- website/content/docs/k8s/multiport/index.mdx | 8 ++-- 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/website/content/docs/k8s/multiport/configure.mdx b/website/content/docs/k8s/multiport/configure.mdx index 0cae43d471f3..d53ea521f340 100644 --- a/website/content/docs/k8s/multiport/configure.mdx +++ b/website/content/docs/k8s/multiport/configure.mdx @@ -29,7 +29,7 @@ There are additional requirements for service mesh proxies in transparent proxy ## Enable the v2 catalog -To enable the v2 catalog and its support for multi-port services, set `global.experiments: ["resource-apis"]` and `ui.enabled: false`. The following example includes these parameter in a Helm chart with minimal required configurations for the Consul installation: +To enable the v2 catalog and its support for multi-port services, set `global.experiments: ["resource-apis"]` and `ui.enabled: false`. The following example includes these parameters in a Helm chart with minimal required configurations for the Consul installation: @@ -78,8 +78,8 @@ $ helm install consul hashicorp/consul --create-namespace --namespace consul --v Consul's v2 catalog supports two methods for defining multi-port services in Kubernetes: -- Method 1: Define a single Kubernetes Service that exposes multiple ports -- Method 2: Define multiple Kubernetes Services that expose individual ports +- **Method 1**: Define a single Kubernetes Service that exposes multiple ports +- **Method 2**: Define multiple Kubernetes Services that expose individual ports These methods affect how the Services are addressed in Kubernetes. @@ -338,10 +338,13 @@ spec: To open a shell to the `web` container, you need the name of the Pod it currently runs on. Run the following command to return a list of Pods: ```shell-session -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -api-7dc9d84f-kfnwz 2/2 Running 0 23s -web-7bc5786747-b5pzl 1/1 Running 0 23s +$ kubectl get pods --namespace consul +NAME READY STATUS RESTARTS AGE +api-5784b54bcc-tp98l 3/3 Running 0 6m55s +consul-connect-injector-54865fbcbf-sfjsl 1/1 Running 0 8m33s +consul-server-0 1/1 Running 0 8m33s +consul-webhook-cert-manager-666676bd5b-cdbxc 1/1 Running 0 8m33s +web-6dcbd684bc-gk8n5 2/2 Running 0 6m55s ``` ### Validate both ports @@ -353,14 +356,14 @@ Use the `web` Pod's name to open a shell session and test the `api` service on p ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api:80 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api:80 hello world ``` Then test the `api` service on port 90. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api:90 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api:90 hello world from 9090 admin ``` @@ -369,14 +372,14 @@ hello world from 9090 admin ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api:80 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api:80 hello world ``` Then test the `api-admin` service on port 90. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api-admin:90 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api-admin:90 hello world from 9090 admin ``` @@ -398,14 +401,14 @@ $ kubectl apply -f deny-90.yaml --namespace consul Then, open a shell session in the `web` container and test the `api` service on port 80. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api:80 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api:80 hello world ``` Test the `api` service on port 90. This command should fail, indicating that the traffic permission is in effect. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api:90 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api:90 ``` @@ -415,14 +418,14 @@ $ kubectl exec -it web-7bc5786747-b5pzl -- curl api:90 Then, open a shell session in the `web` container and test the `api` service on port 80. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api:80 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api:80 hello world ``` Test the `admin` service on port 90. This command should fail, indicating that the traffic permission is in effect. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api-admin:90 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api-admin:90 ``` @@ -449,14 +452,14 @@ $ kubectl apply -f deny-80.yaml --namespace consul Then, open a shell session in the `web` container and test the `api` service on port 90. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api:90 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api:90 hello world from 9090 admin ``` Test the `api` service on port 80. This command should fail, indicating that the traffic permission is in effect. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api:80 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api:80 ``` @@ -466,14 +469,14 @@ $ kubectl exec -it web-7bc5786747-b5pzl -- curl api:80 Then, open a shell session in the `web` container and test the `api-admin` service on port 90. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api-admin:90 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api-admin:90 hello world from 9090 admin ``` Test the `api` service on port 80. This command should fail, indicating that the traffic permission is in effect. ```shell-session -$ kubectl exec -it web-7bc5786747-b5pzl -- curl api:80 +$ kubectl exec -it web-6dcbd684bc-gk8n5 -c web --namespace consul -- curl api:80 ``` diff --git a/website/content/docs/k8s/multiport/index.mdx b/website/content/docs/k8s/multiport/index.mdx index 9b8ef4dac9c1..b8ed8e990cfe 100644 --- a/website/content/docs/k8s/multiport/index.mdx +++ b/website/content/docs/k8s/multiport/index.mdx @@ -14,7 +14,7 @@ Multi-port services are part of a beta release. This documentation supports test This topic describes changes to Consul's catalog that allow you to register a service with multiple ports on Kubernetes deployments. -## Background +## Introduction When Consul registers services, v1 of its catalog API tracks the following information: @@ -22,7 +22,7 @@ When Consul registers services, v1 of its catalog API tracks the following infor - Locations of the _nodes_ the instances run on - Names of the _services_ the instances are associated with -This catalog API was designed prior to the introduction of Consul’s service mesh features. Services and service instances are coupled in the catalog such that Consul's ACL system requires a Kubernetes ServiceAccount resource to match the Service name. As a result, only one service can represent a Kubernetes Workload in the Consul catalog. +This catalog API was designed prior to the introduction of Consul’s service mesh features. The service mesh uses Consul's ACL system, which requires a Kubernetes ServiceAccount resource to match the Service name. As a result, only one service can represent a Kubernetes Workload in the Consul catalog. Since then, the cloud networking needs for applications have evolved and the Consul catalog adapted to support workarounds for these needs. For example, [Kubernetes Pods with multiple ports](/consul/docs/k8s/connect#kubernetes-pods-with-multiple-ports) demonstrates how you can schedule a service with multiple ports so that Consul registers it in the catalog as distinct services with their own service instances. However, this workaround results in additional resource consumption because Consul requires that each service and port use their own proxy and Consul dataplane so that it can recognize them as distinct services. @@ -31,7 +31,7 @@ Since then, the cloud networking needs for applications have evolved and the Con Consul v1.17 introduces a new version of the catalog API designed to bridge differences between the Consul and Kubernetes data models. The v2 catalog API still tracks services and nodes for Consul, but replaces service instances with _workloads_ and _workload identites_. - `Workload` is an application instance running in a set of one or more Pods scheduled according to a Kubernetes Workload resource such as a Deployment or StatefulSet. It is similar to [Kubernetes Workloads](https://kubernetes.io/docs/concepts/workloads/). -- `WorkloadIdentities` provide a distinct identity for a Workload to assume in a Kubernetes cluster. THey are similar to [Kubernetes Service Accounts](https://kubernetes.io/docs/concepts/security/service-accounts/). +- `WorkloadIdentities` provide a distinct identity for a Workload to assume in a Kubernetes cluster. They are similar to [Kubernetes Service Accounts](https://kubernetes.io/docs/concepts/security/service-accounts/). This catalog structure enables Consul to associate a single Kubernetes Workload with multiple services in its catalog. @@ -61,7 +61,7 @@ For an example configuration and instructions for each of the steps in this work Be aware of the following constraints and technical limitations on using multi-port services and the v2 catalog API: -- The v2 catalog API beta does not support connections with client agents. It is only available for Kubernetes deployments, which use [Consul dataplanes](consul/docs/connect/dataplane) instead of client agents. +- The v2 catalog API beta does not support connections with client agents. It is only available for Kubernetes deployments, which use [Consul dataplanes](/consul/docs/connect/dataplane) instead of client agents. - The v1 and v2 catalog APIs cannot run concurrently. - The Consul UI does not support multi-port services or the v2 catalog API in this release. You must disable the UI in the Helm chart in order to use the v2 catalog API. - HCP Consul does not support multi-port services or the v2 catalog API in this release. From b81c8627dbecbec18153440c6ee0e45ed9c3f7ec Mon Sep 17 00:00:00 2001 From: David Yu Date: Mon, 16 Oct 2023 14:21:58 -0700 Subject: [PATCH 014/130] Add reason why port 53 is not used by default (#19222) * Update dns-configuration.mdx * Update website/content/docs/services/discovery/dns-configuration.mdx Co-authored-by: Tu Nguyen --- website/content/docs/services/discovery/dns-configuration.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/services/discovery/dns-configuration.mdx b/website/content/docs/services/discovery/dns-configuration.mdx index 0a10edecdd9d..5a0d890e79bf 100644 --- a/website/content/docs/services/discovery/dns-configuration.mdx +++ b/website/content/docs/services/discovery/dns-configuration.mdx @@ -16,7 +16,7 @@ The Consul DNS is the primary interface for querying records when Consul service By default, the Consul DNS listens for queries at `127.0.0.1:8600` and uses the `consul` domain. Specify the following parameters in the agent configuration to determine DNS behavior when querying services: - [`client_addr`](/consul/docs/agent/config/config-files#client_addr) -- [`ports.dns`](/consul/docs/agent/config/config-files#dns_port) +- [`ports.dns`](/consul/docs/agent/config/config-files#dns_port) : Consul does not use port `53`, which is typically reserved for the default port for DNS resolvers, by default because it requires an escalated privilege to bind to. - [`recursors`](/consul/docs/agent/config/config-files#recursors) - [`domain`](/consul/docs/agent/config/config-files#domain) - [`alt_domain`](/consul/docs/agent/config/config-files#alt_domain) From 4c5a46e5e128119dd41dbf029328b83b3932c04d Mon Sep 17 00:00:00 2001 From: Semir Patel Date: Mon, 16 Oct 2023 16:43:47 -0500 Subject: [PATCH 015/130] v2tenancy: rename v1alpha1 -> v2beta1 (#19227) --- internal/catalog/catalogtest/run_test.go | 2 +- .../catalogtest/test_integration_v2beta1.go | 4 +- internal/tenancy/exports.go | 10 +- internal/tenancy/internal/types/namespace.go | 17 +- .../tenancy/internal/types/namespace_test.go | 4 +- internal/tenancy/internal/types/types.go | 6 +- .../pbtenancy/v1alpha1/namespace.pb.go | 172 ------------------ .../namespace.pb.binary.go | 4 +- .../pbtenancy/v2beta1/namespace.pb.go | 171 +++++++++++++++++ .../{v1alpha1 => v2beta1}/namespace.proto | 2 +- .../namespace_deepcopy.gen.go | 2 +- .../namespace_json.gen.go | 2 +- .../resource_types.gen.go | 4 +- .../test/catalog/catalog_test.go | 2 +- 14 files changed, 201 insertions(+), 201 deletions(-) delete mode 100644 proto-public/pbtenancy/v1alpha1/namespace.pb.go rename proto-public/pbtenancy/{v1alpha1 => v2beta1}/namespace.pb.binary.go (84%) create mode 100644 proto-public/pbtenancy/v2beta1/namespace.pb.go rename proto-public/pbtenancy/{v1alpha1 => v2beta1}/namespace.proto (91%) rename proto-public/pbtenancy/{v1alpha1 => v2beta1}/namespace_deepcopy.gen.go (97%) rename proto-public/pbtenancy/{v1alpha1 => v2beta1}/namespace_json.gen.go (96%) rename proto-public/pbtenancy/{v1alpha1 => v2beta1}/resource_types.gen.go (87%) diff --git a/internal/catalog/catalogtest/run_test.go b/internal/catalog/catalogtest/run_test.go index 2c12785bbb0b..5a6e1e62e354 100644 --- a/internal/catalog/catalogtest/run_test.go +++ b/internal/catalog/catalogtest/run_test.go @@ -38,7 +38,7 @@ func runInMemResourceServiceAndControllers(t *testing.T, deps controllers.Depend func TestControllers_Integration(t *testing.T) { client := runInMemResourceServiceAndControllers(t, catalog.DefaultControllerDependencies()) - RunCatalogV1Alpha1IntegrationTest(t, client) + RunCatalogV2Beta1IntegrationTest(t, client) } func TestControllers_Lifecycle(t *testing.T) { diff --git a/internal/catalog/catalogtest/test_integration_v2beta1.go b/internal/catalog/catalogtest/test_integration_v2beta1.go index 9f83ab365591..79ffea7e7953 100644 --- a/internal/catalog/catalogtest/test_integration_v2beta1.go +++ b/internal/catalog/catalogtest/test_integration_v2beta1.go @@ -26,7 +26,7 @@ var ( testData embed.FS ) -// RunCatalogV1Alpha1IntegrationTest will push up a bunch of catalog related data and then +// RunCatalogV2Beta1IntegrationTest will push up a bunch of catalog related data and then // verify that all the expected reconciliations happened correctly. This test is // intended to exercise a large swathe of behavior of the overall catalog package. // Besides just controller reconciliation behavior, the intent is also to verify @@ -38,7 +38,7 @@ var ( // is another RunCatalogIntegrationTestLifeCycle function that can be used for those // purposes. The two are distinct so that the data being published and the assertions // made against the system can be reused in upgrade tests. -func RunCatalogV1Alpha1IntegrationTest(t *testing.T, client pbresource.ResourceServiceClient) { +func RunCatalogV2Beta1IntegrationTest(t *testing.T, client pbresource.ResourceServiceClient) { t.Helper() PublishCatalogV2Beta1IntegrationTestData(t, client) diff --git a/internal/tenancy/exports.go b/internal/tenancy/exports.go index aadd7efb59be..934f895955d0 100644 --- a/internal/tenancy/exports.go +++ b/internal/tenancy/exports.go @@ -11,14 +11,14 @@ import ( var ( // API Group Information - APIGroup = types.GroupName - VersionV1Alpha1 = types.VersionV1Alpha1 - CurrentVersion = types.CurrentVersion + APIGroup = types.GroupName + VersionV2Beta1 = types.VersionV2Beta1 + CurrentVersion = types.CurrentVersion // Resource Kind Names. - NamespaceKind = types.NamespaceKind - NamespaceV1Alpha1Type = types.NamespaceV1Alpha1Type + NamespaceKind = types.NamespaceKind + NamespaceV2Beta1Type = types.NamespaceV2Beta1Type ) // RegisterTypes adds all resource types within the "tenancy" API group diff --git a/internal/tenancy/internal/types/namespace.go b/internal/tenancy/internal/types/namespace.go index 4bc95d1505f7..1bb016bf3c4d 100644 --- a/internal/tenancy/internal/types/namespace.go +++ b/internal/tenancy/internal/types/namespace.go @@ -5,11 +5,12 @@ package types import ( "fmt" + "strings" + "github.com/hashicorp/consul/agent/dns" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" - tenancyv1alpha1 "github.com/hashicorp/consul/proto-public/pbtenancy/v1alpha1" - "strings" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" ) const ( @@ -17,18 +18,18 @@ const ( ) var ( - NamespaceV1Alpha1Type = &pbresource.Type{ + NamespaceV2Beta1Type = &pbresource.Type{ Group: GroupName, - GroupVersion: VersionV1Alpha1, + GroupVersion: VersionV2Beta1, Kind: NamespaceKind, } - NamespaceType = NamespaceV1Alpha1Type + NamespaceType = NamespaceV2Beta1Type ) func RegisterNamespace(r resource.Registry) { r.Register(resource.Registration{ - Type: NamespaceV1Alpha1Type, - Proto: &tenancyv1alpha1.Namespace{}, + Type: NamespaceType, + Proto: &pbtenancy.Namespace{}, Scope: resource.ScopePartition, Validate: ValidateNamespace, Mutate: MutateNamespace, @@ -42,7 +43,7 @@ func MutateNamespace(res *pbresource.Resource) error { } func ValidateNamespace(res *pbresource.Resource) error { - var ns tenancyv1alpha1.Namespace + var ns pbtenancy.Namespace if err := res.Data.UnmarshalTo(&ns); err != nil { return resource.NewErrDataParse(&ns, err) diff --git a/internal/tenancy/internal/types/namespace_test.go b/internal/tenancy/internal/types/namespace_test.go index 10d70ba82480..2c68097d4cde 100644 --- a/internal/tenancy/internal/types/namespace_test.go +++ b/internal/tenancy/internal/types/namespace_test.go @@ -21,13 +21,13 @@ import ( "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" - pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v1alpha1" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" ) func createNamespaceResource(t *testing.T, data protoreflect.ProtoMessage) *pbresource.Resource { res := &pbresource.Resource{ Id: &pbresource.ID{ - Type: NamespaceV1Alpha1Type, + Type: NamespaceV2Beta1Type, Tenancy: resource.DefaultPartitionedTenancy(), Name: "ns1234", }, diff --git a/internal/tenancy/internal/types/types.go b/internal/tenancy/internal/types/types.go index be0a615153fd..5955ade8a5d7 100644 --- a/internal/tenancy/internal/types/types.go +++ b/internal/tenancy/internal/types/types.go @@ -4,7 +4,7 @@ package types const ( - GroupName = "tenancy" - VersionV1Alpha1 = "v1alpha1" - CurrentVersion = VersionV1Alpha1 + GroupName = "tenancy" + VersionV2Beta1 = "v2beta1" + CurrentVersion = VersionV2Beta1 ) diff --git a/proto-public/pbtenancy/v1alpha1/namespace.pb.go b/proto-public/pbtenancy/v1alpha1/namespace.pb.go deleted file mode 100644 index e7fec0b5d008..000000000000 --- a/proto-public/pbtenancy/v1alpha1/namespace.pb.go +++ /dev/null @@ -1,172 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc (unknown) -// source: pbtenancy/v1alpha1/namespace.proto - -package tenancyv1alpha1 - -import ( - _ "github.com/hashicorp/consul/proto-public/pbresource" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The name of the Namespace is in the outer Resource.ID.Name. -// It must be unique within a partition and must be a -// DNS hostname. There are also other reserved names that may not be used. -type Namespace struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Description is where the user puts any information they want - // about the namespace. It is not used internally. - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *Namespace) Reset() { - *x = Namespace{} - if protoimpl.UnsafeEnabled { - mi := &file_pbtenancy_v1alpha1_namespace_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Namespace) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Namespace) ProtoMessage() {} - -func (x *Namespace) ProtoReflect() protoreflect.Message { - mi := &file_pbtenancy_v1alpha1_namespace_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Namespace.ProtoReflect.Descriptor instead. -func (*Namespace) Descriptor() ([]byte, []int) { - return file_pbtenancy_v1alpha1_namespace_proto_rawDescGZIP(), []int{0} -} - -func (x *Namespace) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -var File_pbtenancy_v1alpha1_namespace_proto protoreflect.FileDescriptor - -var file_pbtenancy_v1alpha1_namespace_proto_rawDesc = []byte{ - 0x0a, 0x22, 0x70, 0x62, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, - 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, 0x1c, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x02, 0x42, 0xab, 0x02, 0x0a, - 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x2f, 0x70, 0x62, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x2f, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x54, 0xaa, 0x02, 0x21, 0x48, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x54, - 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, - 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x5c, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, - 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x5c, 0x56, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0xea, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, - 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, - 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_pbtenancy_v1alpha1_namespace_proto_rawDescOnce sync.Once - file_pbtenancy_v1alpha1_namespace_proto_rawDescData = file_pbtenancy_v1alpha1_namespace_proto_rawDesc -) - -func file_pbtenancy_v1alpha1_namespace_proto_rawDescGZIP() []byte { - file_pbtenancy_v1alpha1_namespace_proto_rawDescOnce.Do(func() { - file_pbtenancy_v1alpha1_namespace_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbtenancy_v1alpha1_namespace_proto_rawDescData) - }) - return file_pbtenancy_v1alpha1_namespace_proto_rawDescData -} - -var file_pbtenancy_v1alpha1_namespace_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pbtenancy_v1alpha1_namespace_proto_goTypes = []interface{}{ - (*Namespace)(nil), // 0: hashicorp.consul.tenancy.v1alpha1.Namespace -} -var file_pbtenancy_v1alpha1_namespace_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_pbtenancy_v1alpha1_namespace_proto_init() } -func file_pbtenancy_v1alpha1_namespace_proto_init() { - if File_pbtenancy_v1alpha1_namespace_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pbtenancy_v1alpha1_namespace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Namespace); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pbtenancy_v1alpha1_namespace_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_pbtenancy_v1alpha1_namespace_proto_goTypes, - DependencyIndexes: file_pbtenancy_v1alpha1_namespace_proto_depIdxs, - MessageInfos: file_pbtenancy_v1alpha1_namespace_proto_msgTypes, - }.Build() - File_pbtenancy_v1alpha1_namespace_proto = out.File - file_pbtenancy_v1alpha1_namespace_proto_rawDesc = nil - file_pbtenancy_v1alpha1_namespace_proto_goTypes = nil - file_pbtenancy_v1alpha1_namespace_proto_depIdxs = nil -} diff --git a/proto-public/pbtenancy/v1alpha1/namespace.pb.binary.go b/proto-public/pbtenancy/v2beta1/namespace.pb.binary.go similarity index 84% rename from proto-public/pbtenancy/v1alpha1/namespace.pb.binary.go rename to proto-public/pbtenancy/v2beta1/namespace.pb.binary.go index f6097062d32d..1884a0943b0d 100644 --- a/proto-public/pbtenancy/v1alpha1/namespace.pb.binary.go +++ b/proto-public/pbtenancy/v2beta1/namespace.pb.binary.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-binary. DO NOT EDIT. -// source: pbtenancy/v1alpha1/namespace.proto +// source: pbtenancy/v2beta1/namespace.proto -package tenancyv1alpha1 +package tenancyv2beta1 import ( "google.golang.org/protobuf/proto" diff --git a/proto-public/pbtenancy/v2beta1/namespace.pb.go b/proto-public/pbtenancy/v2beta1/namespace.pb.go new file mode 100644 index 000000000000..2118814a68a7 --- /dev/null +++ b/proto-public/pbtenancy/v2beta1/namespace.pb.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: pbtenancy/v2beta1/namespace.proto + +package tenancyv2beta1 + +import ( + _ "github.com/hashicorp/consul/proto-public/pbresource" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The name of the Namespace is in the outer Resource.ID.Name. +// It must be unique within a partition and must be a +// DNS hostname. There are also other reserved names that may not be used. +type Namespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Description is where the user puts any information they want + // about the namespace. It is not used internally. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *Namespace) Reset() { + *x = Namespace{} + if protoimpl.UnsafeEnabled { + mi := &file_pbtenancy_v2beta1_namespace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Namespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Namespace) ProtoMessage() {} + +func (x *Namespace) ProtoReflect() protoreflect.Message { + mi := &file_pbtenancy_v2beta1_namespace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Namespace.ProtoReflect.Descriptor instead. +func (*Namespace) Descriptor() ([]byte, []int) { + return file_pbtenancy_v2beta1_namespace_proto_rawDescGZIP(), []int{0} +} + +func (x *Namespace) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +var File_pbtenancy_v2beta1_namespace_proto protoreflect.FileDescriptor + +var file_pbtenancy_v2beta1_namespace_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x70, 0x62, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x2f, 0x76, 0x32, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x2e, 0x76, 0x32, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1c, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x02, 0x42, 0xa4, 0x02, 0x0a, 0x24, 0x63, + 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x2e, 0x76, 0x32, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x42, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x49, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, + 0x70, 0x62, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x2f, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x3b, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, + 0xa2, 0x02, 0x03, 0x48, 0x43, 0x54, 0xaa, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x63, + 0x79, 0x2e, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x54, 0x65, 0x6e, + 0x61, 0x6e, 0x63, 0x79, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xe2, 0x02, 0x2c, 0x48, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, + 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x23, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, + 0x3a, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x79, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pbtenancy_v2beta1_namespace_proto_rawDescOnce sync.Once + file_pbtenancy_v2beta1_namespace_proto_rawDescData = file_pbtenancy_v2beta1_namespace_proto_rawDesc +) + +func file_pbtenancy_v2beta1_namespace_proto_rawDescGZIP() []byte { + file_pbtenancy_v2beta1_namespace_proto_rawDescOnce.Do(func() { + file_pbtenancy_v2beta1_namespace_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbtenancy_v2beta1_namespace_proto_rawDescData) + }) + return file_pbtenancy_v2beta1_namespace_proto_rawDescData +} + +var file_pbtenancy_v2beta1_namespace_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pbtenancy_v2beta1_namespace_proto_goTypes = []interface{}{ + (*Namespace)(nil), // 0: hashicorp.consul.tenancy.v2beta1.Namespace +} +var file_pbtenancy_v2beta1_namespace_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_pbtenancy_v2beta1_namespace_proto_init() } +func file_pbtenancy_v2beta1_namespace_proto_init() { + if File_pbtenancy_v2beta1_namespace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pbtenancy_v2beta1_namespace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Namespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pbtenancy_v2beta1_namespace_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pbtenancy_v2beta1_namespace_proto_goTypes, + DependencyIndexes: file_pbtenancy_v2beta1_namespace_proto_depIdxs, + MessageInfos: file_pbtenancy_v2beta1_namespace_proto_msgTypes, + }.Build() + File_pbtenancy_v2beta1_namespace_proto = out.File + file_pbtenancy_v2beta1_namespace_proto_rawDesc = nil + file_pbtenancy_v2beta1_namespace_proto_goTypes = nil + file_pbtenancy_v2beta1_namespace_proto_depIdxs = nil +} diff --git a/proto-public/pbtenancy/v1alpha1/namespace.proto b/proto-public/pbtenancy/v2beta1/namespace.proto similarity index 91% rename from proto-public/pbtenancy/v1alpha1/namespace.proto rename to proto-public/pbtenancy/v2beta1/namespace.proto index e90b10c1e573..6d4a739f6e21 100644 --- a/proto-public/pbtenancy/v1alpha1/namespace.proto +++ b/proto-public/pbtenancy/v2beta1/namespace.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package hashicorp.consul.tenancy.v1alpha1; +package hashicorp.consul.tenancy.v2beta1; import "pbresource/annotations.proto"; diff --git a/proto-public/pbtenancy/v1alpha1/namespace_deepcopy.gen.go b/proto-public/pbtenancy/v2beta1/namespace_deepcopy.gen.go similarity index 97% rename from proto-public/pbtenancy/v1alpha1/namespace_deepcopy.gen.go rename to proto-public/pbtenancy/v2beta1/namespace_deepcopy.gen.go index 97af531ab333..2384004c869f 100644 --- a/proto-public/pbtenancy/v1alpha1/namespace_deepcopy.gen.go +++ b/proto-public/pbtenancy/v2beta1/namespace_deepcopy.gen.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-deepcopy. DO NOT EDIT. -package tenancyv1alpha1 +package tenancyv2beta1 import ( proto "google.golang.org/protobuf/proto" diff --git a/proto-public/pbtenancy/v1alpha1/namespace_json.gen.go b/proto-public/pbtenancy/v2beta1/namespace_json.gen.go similarity index 96% rename from proto-public/pbtenancy/v1alpha1/namespace_json.gen.go rename to proto-public/pbtenancy/v2beta1/namespace_json.gen.go index 9df4de9df719..4ad7901c16c3 100644 --- a/proto-public/pbtenancy/v1alpha1/namespace_json.gen.go +++ b/proto-public/pbtenancy/v2beta1/namespace_json.gen.go @@ -1,5 +1,5 @@ // Code generated by protoc-json-shim. DO NOT EDIT. -package tenancyv1alpha1 +package tenancyv2beta1 import ( protojson "google.golang.org/protobuf/encoding/protojson" diff --git a/proto-public/pbtenancy/v1alpha1/resource_types.gen.go b/proto-public/pbtenancy/v2beta1/resource_types.gen.go similarity index 87% rename from proto-public/pbtenancy/v1alpha1/resource_types.gen.go rename to proto-public/pbtenancy/v2beta1/resource_types.gen.go index f1b6f70cf103..b0c304040823 100644 --- a/proto-public/pbtenancy/v1alpha1/resource_types.gen.go +++ b/proto-public/pbtenancy/v2beta1/resource_types.gen.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-resource-types. DO NOT EDIT. -package tenancyv1alpha1 +package tenancyv2beta1 import ( "github.com/hashicorp/consul/proto-public/pbresource" @@ -8,7 +8,7 @@ import ( const ( GroupName = "tenancy" - Version = "v1alpha1" + Version = "v2beta1" NamespaceKind = "Namespace" ) diff --git a/test/integration/consul-container/test/catalog/catalog_test.go b/test/integration/consul-container/test/catalog/catalog_test.go index b6e821e1f1f7..5be52792d821 100644 --- a/test/integration/consul-container/test/catalog/catalog_test.go +++ b/test/integration/consul-container/test/catalog/catalog_test.go @@ -29,7 +29,7 @@ func TestCatalog(t *testing.T) { client := pbresource.NewResourceServiceClient(followers[0].GetGRPCConn()) t.Run("one-shot", func(t *testing.T) { - catalogtest.RunCatalogV1Alpha1IntegrationTest(t, client) + catalogtest.RunCatalogV2Beta1IntegrationTest(t, client) }) t.Run("lifecycle", func(t *testing.T) { From 8eb074e7c17a0a2077436637c06f521b036407f5 Mon Sep 17 00:00:00 2001 From: Michael Zalimeni Date: Mon, 16 Oct 2023 17:49:04 -0400 Subject: [PATCH 016/130] [NET-5944] security: Update Go version to 1.20.10 and `x/net` to 0.17.0 (#19225) * Bump golang.org/x/net to 0.17.0 This resolves [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) / [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487). * Update Go version to 1.20.10 This resolves [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) / [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487) (`net/http`). --- .changelog/19225.txt | 9 ++++++++ .github/workflows/build.yml | 22 +++++++++---------- api/go.mod | 4 ++-- api/go.sum | 8 +++---- build-support/docker/Build-Go.dockerfile | 2 +- envoyextensions/go.mod | 2 +- envoyextensions/go.sum | 6 ++--- go.mod | 10 ++++----- go.sum | 20 ++++++++--------- proto-public/go.mod | 6 ++--- proto-public/go.sum | 12 +++++----- sdk/go.mod | 2 +- sdk/go.sum | 4 ++-- test-integ/go.mod | 10 ++++----- test-integ/go.sum | 20 ++++++++--------- .../connect/envoy/test-sds-server/go.mod | 2 +- .../connect/envoy/test-sds-server/go.sum | 16 +++++++------- test/integration/consul-container/go.mod | 10 ++++----- test/integration/consul-container/go.sum | 20 ++++++++--------- testing/deployer/go.mod | 7 +++--- testing/deployer/go.sum | 15 +++++++------ troubleshoot/go.mod | 6 ++--- troubleshoot/go.sum | 12 +++++----- 23 files changed, 118 insertions(+), 107 deletions(-) create mode 100644 .changelog/19225.txt diff --git a/.changelog/19225.txt b/.changelog/19225.txt new file mode 100644 index 000000000000..d4c4d156d454 --- /dev/null +++ b/.changelog/19225.txt @@ -0,0 +1,9 @@ +```release-note:security +Upgrade Go to 1.20.10. +This resolves vulnerability [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) +/ [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487)(`net/http`). +``` +```release-note:security +Update `golang.org/x/net` to v0.17.0 to address [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) +/ [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487)(`x/net/http2`). +``` diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a6bade292ea6..5aade8fadfd6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -85,15 +85,15 @@ jobs: strategy: matrix: include: - - {go: "1.20.8", goos: "linux", goarch: "386"} - - {go: "1.20.8", goos: "linux", goarch: "amd64"} - - {go: "1.20.8", goos: "linux", goarch: "arm"} - - {go: "1.20.8", goos: "linux", goarch: "arm64"} - - {go: "1.20.8", goos: "freebsd", goarch: "386"} - - {go: "1.20.8", goos: "freebsd", goarch: "amd64"} - - {go: "1.20.8", goos: "windows", goarch: "386"} - - {go: "1.20.8", goos: "windows", goarch: "amd64"} - - {go: "1.20.8", goos: "solaris", goarch: "amd64"} + - {go: "1.20.10", goos: "linux", goarch: "386"} + - {go: "1.20.10", goos: "linux", goarch: "amd64"} + - {go: "1.20.10", goos: "linux", goarch: "arm"} + - {go: "1.20.10", goos: "linux", goarch: "arm64"} + - {go: "1.20.10", goos: "freebsd", goarch: "386"} + - {go: "1.20.10", goos: "freebsd", goarch: "amd64"} + - {go: "1.20.10", goos: "windows", goarch: "386"} + - {go: "1.20.10", goos: "windows", goarch: "amd64"} + - {go: "1.20.10", goos: "solaris", goarch: "amd64"} fail-fast: true name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build @@ -182,7 +182,7 @@ jobs: strategy: matrix: include: - - {go: "1.20.8", goos: "linux", goarch: "s390x"} + - {go: "1.20.10", goos: "linux", goarch: "s390x"} fail-fast: true name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build @@ -233,7 +233,7 @@ jobs: matrix: goos: [ darwin ] goarch: [ "amd64", "arm64" ] - go: [ "1.20.8" ] + go: [ "1.20.10" ] fail-fast: true name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build diff --git a/api/go.mod b/api/go.mod index a9e1b8e353a8..33f77a1d250f 100644 --- a/api/go.mod +++ b/api/go.mod @@ -39,8 +39,8 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/stretchr/objx v0.5.0 // indirect - golang.org/x/net v0.13.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.11.0 // indirect + golang.org/x/sys v0.13.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/api/go.sum b/api/go.sum index 6411c8af5446..bfc3fb8a11fb 100644 --- a/api/go.sum +++ b/api/go.sum @@ -182,8 +182,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -211,8 +211,8 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/build-support/docker/Build-Go.dockerfile b/build-support/docker/Build-Go.dockerfile index c0186842fb76..884bc4894af3 100644 --- a/build-support/docker/Build-Go.dockerfile +++ b/build-support/docker/Build-Go.dockerfile @@ -1,7 +1,7 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 -ARG GOLANG_VERSION=1.20.8 +ARG GOLANG_VERSION=1.20.10 FROM golang:${GOLANG_VERSION} WORKDIR /consul diff --git a/envoyextensions/go.mod b/envoyextensions/go.mod index 4d4301841ea3..9a03ccda3679 100644 --- a/envoyextensions/go.mod +++ b/envoyextensions/go.mod @@ -41,7 +41,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect - golang.org/x/sys v0.11.0 // indirect + golang.org/x/sys v0.13.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/envoyextensions/go.sum b/envoyextensions/go.sum index de21ca6c0b42..b611aa2308ee 100644 --- a/envoyextensions/go.sum +++ b/envoyextensions/go.sum @@ -200,7 +200,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -229,8 +229,8 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/go.mod b/go.mod index b89b5d420c24..40bbc3dd9901 100644 --- a/go.mod +++ b/go.mod @@ -109,12 +109,12 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.39.0 go.opentelemetry.io/proto/otlp v0.19.0 go.uber.org/goleak v1.1.10 - golang.org/x/crypto v0.12.0 + golang.org/x/crypto v0.14.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 - golang.org/x/net v0.14.0 + golang.org/x/net v0.17.0 golang.org/x/oauth2 v0.6.0 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.11.0 + golang.org/x/sys v0.13.0 golang.org/x/time v0.3.0 google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 google.golang.org/grpc v1.55.0 @@ -262,8 +262,8 @@ require ( go.uber.org/atomic v1.9.0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/term v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index a809cbba8070..b6a8d9e61906 100644 --- a/go.sum +++ b/go.sum @@ -1037,8 +1037,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1133,8 +1133,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1251,13 +1251,13 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1269,8 +1269,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/proto-public/go.mod b/proto-public/go.mod index b77474df64bb..f784df5ff1e5 100644 --- a/proto-public/go.mod +++ b/proto-public/go.mod @@ -16,9 +16,9 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/stretchr/objx v0.5.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/proto-public/go.sum b/proto-public/go.sum index ffb9a0538ca9..d54ebb9fa118 100644 --- a/proto-public/go.sum +++ b/proto-public/go.sum @@ -32,12 +32,12 @@ github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gt github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= diff --git a/sdk/go.mod b/sdk/go.mod index 9fd95927b085..f4adaf73ba79 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -9,7 +9,7 @@ require ( github.com/hashicorp/go-version v1.2.1 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.8.3 - golang.org/x/sys v0.10.0 + golang.org/x/sys v0.13.0 ) require ( diff --git a/sdk/go.sum b/sdk/go.sum index 170d4464fc55..490559d90093 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -49,8 +49,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/test-integ/go.mod b/test-integ/go.mod index f86dfd043330..c303fc0f04f4 100644 --- a/test-integ/go.mod +++ b/test-integ/go.mod @@ -203,15 +203,15 @@ require ( go.opentelemetry.io/otel/trace v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/crypto v0.12.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/term v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect google.golang.org/api v0.126.0 // indirect diff --git a/test-integ/go.sum b/test-integ/go.sum index e5edc4188f32..d3620eb9ecb9 100644 --- a/test-integ/go.sum +++ b/test-integ/go.sum @@ -843,8 +843,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -932,8 +932,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1040,13 +1040,13 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1058,8 +1058,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/test/integration/connect/envoy/test-sds-server/go.mod b/test/integration/connect/envoy/test-sds-server/go.mod index 916d9b7211ac..2f0902875514 100644 --- a/test/integration/connect/envoy/test-sds-server/go.mod +++ b/test/integration/connect/envoy/test-sds-server/go.mod @@ -9,7 +9,7 @@ require ( github.com/hashicorp/go-hclog v1.5.0 github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/stretchr/testify v1.8.3 // indirect - golang.org/x/net v0.13.0 // indirect + golang.org/x/net v0.17.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.55.0 ) diff --git a/test/integration/connect/envoy/test-sds-server/go.sum b/test/integration/connect/envoy/test-sds-server/go.sum index 1a851374f877..798aacc0b98e 100644 --- a/test/integration/connect/envoy/test-sds-server/go.sum +++ b/test/integration/connect/envoy/test-sds-server/go.sum @@ -1730,7 +1730,7 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1875,8 +1875,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2044,8 +2044,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2055,7 +2055,7 @@ golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2074,8 +2074,8 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/test/integration/consul-container/go.mod b/test/integration/consul-container/go.mod index 3bca51fc2193..4d01c60ce6f9 100644 --- a/test/integration/consul-container/go.mod +++ b/test/integration/consul-container/go.mod @@ -199,14 +199,14 @@ require ( go.opentelemetry.io/otel/trace v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/crypto v0.12.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect - golang.org/x/net v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/term v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect google.golang.org/api v0.126.0 // indirect diff --git a/test/integration/consul-container/go.sum b/test/integration/consul-container/go.sum index eeb6f251dd03..bee49ec44b27 100644 --- a/test/integration/consul-container/go.sum +++ b/test/integration/consul-container/go.sum @@ -829,8 +829,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -918,8 +918,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1025,13 +1025,13 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1043,8 +1043,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/testing/deployer/go.mod b/testing/deployer/go.mod index df6cb924f087..4b139b5edf03 100644 --- a/testing/deployer/go.mod +++ b/testing/deployer/go.mod @@ -13,7 +13,7 @@ require ( github.com/mitchellh/copystructure v1.2.0 github.com/rboyer/safeio v0.2.2 github.com/stretchr/testify v1.8.3 - golang.org/x/crypto v0.11.0 + golang.org/x/crypto v0.14.0 ) require ( @@ -39,8 +39,9 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/zclconf/go-cty v1.12.1 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/testing/deployer/go.sum b/testing/deployer/go.sum index fb90eb4a6a33..1db52e2ddbbd 100644 --- a/testing/deployer/go.sum +++ b/testing/deployer/go.sum @@ -182,8 +182,8 @@ github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeW golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -193,7 +193,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -219,15 +220,15 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/troubleshoot/go.mod b/troubleshoot/go.mod index a12fbde8a1fa..04714c696267 100644 --- a/troubleshoot/go.mod +++ b/troubleshoot/go.mod @@ -49,9 +49,9 @@ require ( github.com/rogpeppe/go-internal v1.10.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.55.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect diff --git a/troubleshoot/go.sum b/troubleshoot/go.sum index b5f44a5e8f16..b1a898785448 100644 --- a/troubleshoot/go.sum +++ b/troubleshoot/go.sum @@ -378,8 +378,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -438,8 +438,8 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -448,8 +448,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 9f4f99c62699930cb1182c15104036159372a670 Mon Sep 17 00:00:00 2001 From: John Murret Date: Mon, 16 Oct 2023 19:45:54 -0600 Subject: [PATCH 017/130] NET-6097 - sidecar proxy controller - give name to first failover policy target (#19239) --- .../controllers/sidecarproxy/builder/destinations.go | 12 ++++++++++-- .../destination/mixed-multi-destination.golden | 7 ++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/destinations.go b/internal/mesh/internal/controllers/sidecarproxy/builder/destinations.go index 185f56c851c2..f72f6ec314c9 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/destinations.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/destinations.go @@ -287,7 +287,12 @@ func (b *Builder) buildDestination( ) clusterName := fmt.Sprintf("%s.%s", portName, sni) - egBase := b.newClusterEndpointGroup("", sni, portName, details.IdentityRefs, connectTimeout, loadBalancer) + egName := "" + + if details.FailoverConfig != nil { + egName = fmt.Sprintf("%s%d~%s", xdscommon.FailoverClusterNamePrefix, 0, clusterName) + } + egBase := b.newClusterEndpointGroup(egName, sni, portName, details.IdentityRefs, connectTimeout, loadBalancer) var endpointGroups []*pbproxystate.EndpointGroup @@ -319,7 +324,10 @@ func (b *Builder) buildDestination( destDC, b.trustDomain, ) - destClusterName := fmt.Sprintf("%s%d~%s", xdscommon.FailoverClusterNamePrefix, i, clusterName) + + // index 0 was already given to non-fail original + failoverGroupIndex := i + 1 + destClusterName := fmt.Sprintf("%s%d~%s", xdscommon.FailoverClusterNamePrefix, failoverGroupIndex, clusterName) egDest := b.newClusterEndpointGroup(destClusterName, destSNI, destPortName, destDetails.IdentityRefs, destConnectTimeout, destLoadBalancer) diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination.golden index 77f4181c317f..000f94ea34a4 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination.golden @@ -30,7 +30,8 @@ } } } - } + }, + "name": "failover-target~0~http.api-1.default.dc1.internal.foo.consul" }, { "dynamic": { @@ -54,7 +55,7 @@ } } }, - "name": "failover-target~0~http.api-1.default.dc1.internal.foo.consul" + "name": "failover-target~1~http.api-1.default.dc1.internal.foo.consul" } ] }, @@ -310,7 +311,7 @@ } }, "requiredEndpoints": { - "failover-target~0~http.api-1.default.dc1.internal.foo.consul": { + "failover-target~1~http.api-1.default.dc1.internal.foo.consul": { "id": { "name": "backup-1", "tenancy": { From a6c990c6fe618c10040d64b8f3f7e0e0463e7b7a Mon Sep 17 00:00:00 2001 From: Chris Hut Date: Tue, 17 Oct 2023 06:27:42 -0700 Subject: [PATCH 018/130] Cc 5545: Upgrade HDS packages and modifiers (#19226) * Upgrade @hashicorp/design-system-tokens to 1.9.0 * Upgrade @hashicorp/design-system-components to 1.8.1 * Upgrade @hashicorp/design-system-components and ember-in-viewport * Explicitly install ember-modifier@4.1.0 * rename copy-button * Fix how cleanup is done in with-copyable * Update aria-menu modifier for new structure * Update css-prop modifier to new structure * Convert did-upsert to regular class modifier * Update notification modifier for new structure * Update on-oustside modifier for new structure * Move destroy handler registration in with-copyable * Update style modifier for new structure * Update validate modifier for new structure * Guard against setting on destroyed object * Upgrade @hashicorp/design-system-components to 2.14.1 * Remove debugger * Guard against null in aria-menu * Fix undefined hash in validate addon * Upgrade ember-on-resize-modifier * Fix copy button import, missing import and array destructuring --------- Co-authored-by: wenincode --- .../consul/lock-session/list/index.hbs | 4 +- .../consul/peer/address/list/index.hbs | 2 +- .../app/components/code-editor/README.mdx | 2 +- .../app/components/code-editor/index.hbs | 2 +- .../app/components/code-editor/index.js | 32 +- .../README.mdx | 8 +- .../chart.xstate.js | 0 .../index.hbs | 4 +- .../index.js | 2 +- .../index.scss | 0 .../layout.scss | 0 .../skin.scss | 0 .../consul/exposed-path/list/index.hbs | 2 +- .../consul/health-check/list/index.hbs | 2 +- .../app/components/consul/node/list/index.hbs | 2 +- .../components/consul/token/list/index.hbs | 4 +- .../consul/upstream-instance/list/index.hbs | 4 +- .../components/consul/upstream/list/index.hbs | 2 +- .../app/components/copyable-code/index.hbs | 4 +- .../components/disclosure-menu/menu/index.hbs | 18 +- .../components/horizontal-kv-list/README.mdx | 8 +- .../consul-ui/app/components/menu/index.hbs | 25 +- .../consul-ui/app/modifiers/aria-menu.js | 46 +- .../consul-ui/app/modifiers/css-prop.js | 9 +- .../consul-ui/app/modifiers/did-upsert.js | 46 +- .../consul-ui/app/modifiers/notification.js | 29 +- .../consul-ui/app/modifiers/on-outside.js | 43 +- ui/packages/consul-ui/app/modifiers/style.js | 14 +- .../consul-ui/app/modifiers/validate.js | 57 ++- .../consul-ui/app/modifiers/with-copyable.js | 32 +- .../consul-ui/app/styles/components.scss | 2 +- .../consul-ui/app/templates/dc/nodes/show.hbs | 2 +- .../app/templates/dc/services/instance.hbs | 2 +- ui/packages/consul-ui/package.json | 14 +- .../en-us.yaml | 0 ui/yarn.lock | 410 ++++++++++++++---- 36 files changed, 525 insertions(+), 308 deletions(-) rename ui/packages/consul-ui/app/components/{copy-button => consul-copy-button}/README.mdx (93%) rename ui/packages/consul-ui/app/components/{copy-button => consul-copy-button}/chart.xstate.js (100%) rename ui/packages/consul-ui/app/components/{copy-button => consul-copy-button}/index.hbs (76%) rename ui/packages/consul-ui/app/components/{copy-button => consul-copy-button}/index.js (80%) rename ui/packages/consul-ui/app/components/{copy-button => consul-copy-button}/index.scss (100%) rename ui/packages/consul-ui/app/components/{copy-button => consul-copy-button}/layout.scss (100%) rename ui/packages/consul-ui/app/components/{copy-button => consul-copy-button}/skin.scss (100%) rename ui/packages/consul-ui/translations/components/{copy-button => consul-copy-button}/en-us.yaml (100%) diff --git a/ui/packages/consul-lock-sessions/app/components/consul/lock-session/list/index.hbs b/ui/packages/consul-lock-sessions/app/components/consul/lock-session/list/index.hbs index 247b0dd8fc9d..465c0721ec7b 100644 --- a/ui/packages/consul-lock-sessions/app/components/consul/lock-session/list/index.hbs +++ b/ui/packages/consul-lock-sessions/app/components/consul/lock-session/list/index.hbs @@ -14,7 +14,7 @@ as |item index|> {{else}} {{item.ID}} - @@ -28,7 +28,7 @@ as |item index|> ID
- diff --git a/ui/packages/consul-peerings/app/components/consul/peer/address/list/index.hbs b/ui/packages/consul-peerings/app/components/consul/peer/address/list/index.hbs index bc6294e015f0..aabada41ad10 100644 --- a/ui/packages/consul-peerings/app/components/consul/peer/address/list/index.hbs +++ b/ui/packages/consul-peerings/app/components/consul/peer/address/list/index.hbs @@ -18,7 +18,7 @@
{{address}}
-
- diff --git a/ui/packages/consul-ui/app/components/code-editor/index.js b/ui/packages/consul-ui/app/components/code-editor/index.js index 72e3abf8eb8a..65566d159b76 100644 --- a/ui/packages/consul-ui/app/components/code-editor/index.js +++ b/ui/packages/consul-ui/app/components/code-editor/index.js @@ -36,22 +36,24 @@ export default Component.extend({ } }, setMode: function (mode) { - let options = { - ...DEFAULTS, - mode: mode.mime, - readOnly: this.readonly, - }; - if (mode.name === 'XML') { - options.htmlMode = mode.htmlMode; - options.matchClosing = mode.matchClosing; - options.alignCDATA = mode.alignCDATA; - } - set(this, 'options', options); + if (!this.isDestroying && !this.isDestroyed) { + let options = { + ...DEFAULTS, + mode: mode.mime, + readOnly: this.readonly, + }; + if (mode.name === 'XML') { + options.htmlMode = mode.htmlMode; + options.matchClosing = mode.matchClosing; + options.alignCDATA = mode.alignCDATA; + } + set(this, 'options', options); - const editor = this.editor; - editor.setOption('mode', mode.mime); - this.helper.lint(editor, mode.mode); - set(this, 'mode', mode); + const editor = this.editor; + editor.setOption('mode', mode.mime); + this.helper.lint(editor, mode.mode); + set(this, 'mode', mode); + } }, willDestroyElement: function () { this._super(...arguments); diff --git a/ui/packages/consul-ui/app/components/copy-button/README.mdx b/ui/packages/consul-ui/app/components/consul-copy-button/README.mdx similarity index 93% rename from ui/packages/consul-ui/app/components/copy-button/README.mdx rename to ui/packages/consul-ui/app/components/consul-copy-button/README.mdx index 7f45737fdb78..c207545f30f1 100644 --- a/ui/packages/consul-ui/app/components/copy-button/README.mdx +++ b/ui/packages/consul-ui/app/components/consul-copy-button/README.mdx @@ -1,4 +1,4 @@ -# CopyButton +# ConsulCopyButton Button component used for copy-to-clipboard functionality so the user can easily copy specified text to their clipboard, along with tooltip-like notifications so the user has some sort of feedback to know the value has been copied. @@ -10,7 +10,7 @@ Can be used inline to render only a small icon for the button with no other text
Icon only
- @@ -20,12 +20,12 @@ Can be used inline to render only a small icon for the button with no other text
Icon and text
- Copy me! - +
``` diff --git a/ui/packages/consul-ui/app/components/copy-button/chart.xstate.js b/ui/packages/consul-ui/app/components/consul-copy-button/chart.xstate.js similarity index 100% rename from ui/packages/consul-ui/app/components/copy-button/chart.xstate.js rename to ui/packages/consul-ui/app/components/consul-copy-button/chart.xstate.js diff --git a/ui/packages/consul-ui/app/components/copy-button/index.hbs b/ui/packages/consul-ui/app/components/consul-copy-button/index.hbs similarity index 76% rename from ui/packages/consul-ui/app/components/copy-button/index.hbs rename to ui/packages/consul-ui/app/components/consul-copy-button/index.hbs index ed3b42388cd3..ace1ff323223 100644 --- a/ui/packages/consul-ui/app/components/copy-button/index.hbs +++ b/ui/packages/consul-ui/app/components/consul-copy-button/index.hbs @@ -14,12 +14,12 @@ {{#let (fn dispatch 'SUCCESS') (fn dispatch 'ERROR') (fn dispatch 'RESET') as |success error reset|}}