Skip to content

Commit

Permalink
Merge branch 'main' into NET-4519
Browse files Browse the repository at this point in the history
  • Loading branch information
absolutelightning authored Sep 19, 2023
2 parents c52adb4 + 212793a commit 738e78d
Show file tree
Hide file tree
Showing 143 changed files with 4,393 additions and 1,945 deletions.
3 changes: 3 additions & 0 deletions .changelog/17107.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:breaking-change
api: RaftLeaderTransfer now requires an id string. An empty string can be specified to keep the old behavior.
```
3 changes: 3 additions & 0 deletions .changelog/18831.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
gateways: Fix a bug where gateway to service mappings weren't being cleaned up properly when externally registered proxies were being deregistered.
```
3 changes: 3 additions & 0 deletions .copywrite.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ project {
"agent/grpc-middleware/rate_limit_mappings.gen.go",
"agent/uiserver/dist/**",

# ignoring policy embedded files
"agent/structs/acltemplatedpolicy/policies/ce/**",

# licensed under MPL - ignoring for now until the copywrite tool can support
# multiple licenses per repo.
"sdk/**",
Expand Down
8 changes: 4 additions & 4 deletions .github/scripts/get_runner_classes_windows.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@ set -euo pipefail
case "$GITHUB_REPOSITORY" in
*-enterprise)
# shellcheck disable=SC2129
echo "compute-small=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.4xlarge']" >>"$GITHUB_OUTPUT"
echo "compute-medium=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.8xlarge']" >>"$GITHUB_OUTPUT"
echo "compute-large=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.12xlarge']" >>"$GITHUB_OUTPUT"
echo "compute-small=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.2xlarge']" >>"$GITHUB_OUTPUT"
echo "compute-medium=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.4xlarge']" >>"$GITHUB_OUTPUT"
echo "compute-large=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.8xlarge']" >>"$GITHUB_OUTPUT"
# m5d.8xlarge is equivalent to our xl custom runner in CE
echo "compute-xl=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.16xlarge']" >>"$GITHUB_OUTPUT"
echo "compute-xl=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.12xlarge']" >>"$GITHUB_OUTPUT"
;;
*)
# shellcheck disable=SC2129
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/test-integrations-windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,11 @@ jobs:
name: '${{ env.CONSUL_BINARY_UPLOAD_NAME }}'
path: ${{ github.workspace }}

- name: Create dist folder and copy binary
run: |
mkdir dist
cp ${{ github.workspace }}\consul.exe dist\
- name: Restore mode+x
run: icacls ${{ github.workspace }}\consul.exe /grant:rx Everyone:RX

Expand Down
4 changes: 2 additions & 2 deletions agent/acl_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1378,7 +1378,7 @@ func TestACL_HTTP(t *testing.T) {

require.Equal(t, api.ACLTemplatedPolicyResponse{
TemplateName: api.ACLTemplatedPolicyServiceName,
Schema: structs.ACLTemplatedPolicyIdentitiesSchema,
Schema: structs.ACLTemplatedPolicyServiceSchema,
Template: structs.ACLTemplatedPolicyService,
}, list[api.ACLTemplatedPolicyServiceName])
})
Expand All @@ -1401,7 +1401,7 @@ func TestACL_HTTP(t *testing.T) {

var templatedPolicy api.ACLTemplatedPolicyResponse
require.NoError(t, json.NewDecoder(resp.Body).Decode(&templatedPolicy))
require.Equal(t, structs.ACLTemplatedPolicyDNSSchema, templatedPolicy.Schema)
require.Equal(t, structs.ACLTemplatedPolicyNoRequiredVariablesSchema, templatedPolicy.Schema)
require.Equal(t, api.ACLTemplatedPolicyDNSName, templatedPolicy.TemplateName)
require.Equal(t, structs.ACLTemplatedPolicyDNS, templatedPolicy.Template)
})
Expand Down
5 changes: 1 addition & 4 deletions agent/agent_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -737,9 +737,6 @@ func TestAgent_Service(t *testing.T) {
if tt.wantWait != 0 {
assert.True(t, elapsed >= tt.wantWait, "should have waited at least %s, "+
"took %s", tt.wantWait, elapsed)
} else {
assert.True(t, elapsed < 10*time.Millisecond, "should not have waited, "+
"took %s", elapsed)
}

if tt.wantResp != nil {
Expand Down Expand Up @@ -4448,7 +4445,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
}
`,
enableACL: true,
policies: ``, // No policy means no valid token
policies: ``, // No policies means no valid token
wantNS: nil,
wantErr: "Permission denied",
},
Expand Down
17 changes: 6 additions & 11 deletions agent/consul/leader_connect_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,22 +52,23 @@ func TestConnectCA_ConfigurationSet_ChangeKeyConfig_Primary(t *testing.T) {
src := src
dst := dst
t.Run(fmt.Sprintf("%s-%d to %s-%d", src.keyType, src.keyBits, dst.keyType, dst.keyBits), func(t *testing.T) {
t.Parallel()
// TODO(flaky): making this test parallel seems to create performance problems
// in CI. Until we spend time optimizing this test, it's best to take the runtime hit.
// t.Parallel()

providerState := map[string]string{"foo": "dc1-value"}

// Initialize primary as the primary DC
_, srv := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.PrimaryDatacenter = "dc1"
c.Build = "1.6.0"
c.CAConfig.Config["PrivateKeyType"] = src.keyType
c.CAConfig.Config["PrivateKeyBits"] = src.keyBits
c.CAConfig.Config["test_state"] = providerState
})
codec := rpcClient(t, srv)

waitForLeaderEstablishment(t, srv)
testrpc.WaitForLeader(t, srv.RPC, "dc1")
testrpc.WaitForActiveCARoot(t, srv.RPC, "dc1", nil)

var (
Expand Down Expand Up @@ -557,23 +558,17 @@ func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {

t.Parallel()

dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.Build = "1.6.0"
_, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()

testrpc.WaitForLeader(t, s1.RPC, "dc1")

// dc2 as a secondary DC
dir2, s2 := testServerWithConfig(t, func(c *Config) {
_, s2 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc1"
c.Build = "1.6.0"
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()

// Create the WAN link
joinWAN(t, s2, s1)
Expand Down
34 changes: 22 additions & 12 deletions agent/consul/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ const (
LeaderTransferMinVersion = "1.6.0"

CatalogResourceExperimentName = "resource-apis"
V2TenancyExperimentName = "v2tenancy"
)

const (
Expand Down Expand Up @@ -819,7 +820,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
go s.reportingManager.Run(&lib.StopChannelContext{StopCh: s.shutdownCh})

// Setup insecure resource service client.
if err := s.setupInsecureResourceServiceClient(flat.Registry, logger); err != nil {
if err := s.setupInsecureResourceServiceClient(flat.Registry, logger, flat); err != nil {
return nil, err
}

Expand Down Expand Up @@ -1393,29 +1394,38 @@ func (s *Server) setupExternalGRPC(config *Config, deps Deps, logger hclog.Logge
})
s.peerStreamServer.Register(s.externalGRPCServer)

tenancyBridge := NewV1TenancyBridge(s)
if stringslice.Contains(deps.Experiments, V2TenancyExperimentName) {
tenancyBridge = resource.NewV2TenancyBridge()
}

s.resourceServiceServer = resourcegrpc.NewServer(resourcegrpc.Config{
Registry: deps.Registry,
Backend: s.raftStorageBackend,
ACLResolver: s.ACLResolver,
Logger: logger.Named("grpc-api.resource"),
V1TenancyBridge: NewV1TenancyBridge(s),
Registry: deps.Registry,
Backend: s.raftStorageBackend,
ACLResolver: s.ACLResolver,
Logger: logger.Named("grpc-api.resource"),
TenancyBridge: tenancyBridge,
})
s.resourceServiceServer.Register(s.externalGRPCServer)

reflection.Register(s.externalGRPCServer)
}

func (s *Server) setupInsecureResourceServiceClient(typeRegistry resource.Registry, logger hclog.Logger) error {
func (s *Server) setupInsecureResourceServiceClient(typeRegistry resource.Registry, logger hclog.Logger, deps Deps) error {
if s.raftStorageBackend == nil {
return fmt.Errorf("raft storage backend cannot be nil")
}

tenancyBridge := NewV1TenancyBridge(s)
if stringslice.Contains(deps.Experiments, V2TenancyExperimentName) {
tenancyBridge = resource.NewV2TenancyBridge()
}
server := resourcegrpc.NewServer(resourcegrpc.Config{
Registry: typeRegistry,
Backend: s.raftStorageBackend,
ACLResolver: resolver.DANGER_NO_AUTH{},
Logger: logger.Named("grpc-api.resource"),
V1TenancyBridge: NewV1TenancyBridge(s),
Registry: typeRegistry,
Backend: s.raftStorageBackend,
ACLResolver: resolver.DANGER_NO_AUTH{},
Logger: logger.Named("grpc-api.resource"),
TenancyBridge: tenancyBridge,
})

conn, err := s.runInProcessGRPCServer(server.Register)
Expand Down
6 changes: 6 additions & 0 deletions agent/consul/state/catalog.go
Original file line number Diff line number Diff line change
Expand Up @@ -2065,6 +2065,12 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
if err := cleanupKindServiceName(tx, idx, sn, structs.ServiceKindConnectEnabled); err != nil {
return fmt.Errorf("failed to cleanup connect-enabled service name: %v", err)
}
// we need to do this if the proxy is deleted after the service itself
// as the guard after this might not be 1-1 between proxy and service
// names.
if err := cleanupGatewayWildcards(tx, idx, sn, false); err != nil {
return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", psn.String(), err)
}
}
}

Expand Down
79 changes: 79 additions & 0 deletions agent/consul/state/catalog_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8311,6 +8311,85 @@ func TestCatalog_cleanupGatewayWildcards_panic(t *testing.T) {
require.NoError(t, s.DeleteNode(6, "foo", nil, ""))
}

func TestCatalog_cleanupGatewayWildcards_proxy(t *testing.T) {
s := testStateStore(t)

require.NoError(t, s.EnsureNode(0, &structs.Node{
ID: "c73b8fdf-4ef8-4e43-9aa2-59e85cc6a70c",
Node: "foo",
}))
require.NoError(t, s.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
"protocol": "http",
},
}))

defaultMeta := structs.DefaultEnterpriseMetaInDefaultPartition()

require.NoError(t, s.EnsureConfigEntry(3, &structs.IngressGatewayConfigEntry{
Kind: "ingress-gateway",
Name: "my-gateway-2-ingress",
Listeners: []structs.IngressListener{
{
Port: 1111,
Protocol: "http",
Services: []structs.IngressService{
{
Name: "*",
EnterpriseMeta: *defaultMeta,
},
},
},
},
}))

// Register two services, a regular service, and a sidecar proxy for it
api := structs.NodeService{
ID: "api",
Service: "api",
Address: "127.0.0.2",
Port: 443,
EnterpriseMeta: *defaultMeta,
}
require.NoError(t, s.EnsureService(4, "foo", &api))
proxy := structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
ID: "api-proxy",
Service: "api-proxy",
Address: "127.0.0.3",
Port: 443,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "api",
DestinationServiceID: "api",
},
EnterpriseMeta: *defaultMeta,
}
require.NoError(t, s.EnsureService(5, "foo", &proxy))

// make sure we have only one gateway service
_, services, err := s.GatewayServices(nil, "my-gateway-2-ingress", defaultMeta)
require.NoError(t, err)
require.Len(t, services, 1)

// now delete the target service
require.NoError(t, s.DeleteService(6, "foo", "api", nil, ""))

// at this point we still have the gateway services because we have a connect proxy still
_, services, err = s.GatewayServices(nil, "my-gateway-2-ingress", defaultMeta)
require.NoError(t, err)
require.Len(t, services, 1)

// now delete the connect proxy
require.NoError(t, s.DeleteService(7, "foo", "api-proxy", nil, ""))

// make sure we no longer have any services
_, services, err = s.GatewayServices(nil, "my-gateway-2-ingress", defaultMeta)
require.NoError(t, err)
require.Len(t, services, 0)
}

func TestCatalog_DownstreamsForService(t *testing.T) {
defaultMeta := structs.DefaultEnterpriseMetaInDefaultPartition()

Expand Down
4 changes: 3 additions & 1 deletion agent/consul/tenancy_bridge.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,15 @@

package consul

import "github.com/hashicorp/consul/agent/grpc-external/services/resource"

// V1TenancyBridge is used by the resource service to access V1 implementations of
// partitions and namespaces. This bridge will be removed when V2 implemenations
// of partitions and namespaces are available.
type V1TenancyBridge struct {
server *Server
}

func NewV1TenancyBridge(server *Server) *V1TenancyBridge {
func NewV1TenancyBridge(server *Server) resource.TenancyBridge {
return &V1TenancyBridge{server: server}
}
2 changes: 1 addition & 1 deletion agent/grpc-external/services/resource/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
"github.com/hashicorp/consul/proto-public/pbresource"
)

// Deletes a resource.
// Delete deletes a resource.
// - To delete a resource regardless of the stored version, set Version = ""
// - Supports deleting a resource by name, hence Id.Uid may be empty.
// - Delete of a previously deleted or non-existent resource is a no-op to support idempotency.
Expand Down
2 changes: 1 addition & 1 deletion agent/grpc-external/services/resource/list_by_owner.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func (s *Server) ListByOwner(ctx context.Context, req *pbresource.ListByOwnerReq
}

// Check v1 tenancy exists for the v2 resource.
if err = v1TenancyExists(reg, s.V1TenancyBridge, req.Owner.Tenancy, codes.InvalidArgument); err != nil {
if err = v1TenancyExists(reg, s.TenancyBridge, req.Owner.Tenancy, codes.InvalidArgument); err != nil {
return nil, err
}

Expand Down
2 changes: 1 addition & 1 deletion agent/grpc-external/services/resource/read.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func (s *Server) Read(ctx context.Context, req *pbresource.ReadRequest) (*pbreso
}

// Check V1 tenancy exists for the V2 resource.
if err = v1TenancyExists(reg, s.V1TenancyBridge, req.Id.Tenancy, codes.NotFound); err != nil {
if err = v1TenancyExists(reg, s.TenancyBridge, req.Id.Tenancy, codes.NotFound); err != nil {
return nil, err
}

Expand Down
6 changes: 3 additions & 3 deletions agent/grpc-external/services/resource/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ type Config struct {
// Backend is the storage backend that will be used for resource persistence.
Backend Backend
ACLResolver ACLResolver
// V1TenancyBridge temporarily allows us to use V1 implementations of
// TenancyBridge temporarily allows us to use V1 implementations of
// partitions and namespaces until V2 implementations are available.
V1TenancyBridge TenancyBridge
TenancyBridge TenancyBridge
}

//go:generate mockery --name Registry --inpackage
Expand Down Expand Up @@ -147,7 +147,7 @@ func validateId(id *pbresource.ID, errorPrefix string) error {
id.Tenancy = &pbresource.Tenancy{
Partition: "",
Namespace: "",
// TODO(spatel): Remove when peerTenancy introduced.
// TODO(spatel): NET-5475 - Remove as part of peer_name moving to PeerTenancy
PeerName: "local",
}
}
Expand Down
10 changes: 5 additions & 5 deletions agent/grpc-external/services/resource/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,11 +86,11 @@ func testServer(t *testing.T) *Server {
mockTenancyBridge.On("IsNamespaceMarkedForDeletion", resource.DefaultPartitionName, resource.DefaultNamespaceName).Return(false, nil)

return NewServer(Config{
Logger: testutil.Logger(t),
Registry: resource.NewRegistry(),
Backend: backend,
ACLResolver: mockACLResolver,
V1TenancyBridge: mockTenancyBridge,
Logger: testutil.Logger(t),
Registry: resource.NewRegistry(),
Backend: backend,
ACLResolver: mockACLResolver,
TenancyBridge: mockTenancyBridge,
})
}

Expand Down
Loading

0 comments on commit 738e78d

Please sign in to comment.