Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ci: fix runner calculation to exclude the top level directory as part of the calculation #17090

Merged
merged 7 commits into from
Apr 24, 2023
Merged
3 changes: 3 additions & 0 deletions .changelog/5200.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:improvement
connect: update supported envoy versions to 1.23.8, 1.24.6, 1.25.4, 1.26.0
```
38 changes: 10 additions & 28 deletions .github/workflows/test-integrations.yml
Original file line number Diff line number Diff line change
Expand Up @@ -153,16 +153,17 @@ jobs:
# this is further going to multiplied in envoy-integration tests by the
# other dimensions in the matrix. Currently TOTAL_RUNNERS would be
# multiplied by 8 based on these values:
# envoy-version: ["1.22.11", "1.23.8", "1.24.6", "1.25.4"]
# envoy-version: ["1.23.8", "1.24.6", "1.25.4", "1.26.0"]
# xds-target: ["server", "client"]
TOTAL_RUNNERS: 4
JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]'
run: |
NUM_RUNNERS=$TOTAL_RUNNERS
NUM_DIRS=$(find ./test/integration/connect/envoy -maxdepth 1 -type d | wc -l)
NUM_DIRS=$(find ./test/integration/connect/envoy -mindepth 1 -maxdepth 1 -type d | wc -l)

if [ "$NUM_DIRS" -lt "$NUM_RUNNERS" ]; then
echo "TOTAL_RUNNERS is larger than the number of tests/packages to split."
NUM_RUNNERS=$NUM_DIRS
NUM_RUNNERS=$((NUM_DIRS-1))
fi
# fix issue where test splitting calculation generates 1 more split than TOTAL_RUNNERS.
NUM_RUNNERS=$((NUM_RUNNERS-1))
Expand All @@ -176,42 +177,21 @@ jobs:

envoy-integration-test:
runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }}
permissions:
id-token: write # NOTE: this permission is explicitly required for Vault auth.
contents: read
needs:
- setup
- generate-envoy-job-matrices
- dev-build
strategy:
fail-fast: false
matrix:
envoy-version: ["1.22.11", "1.23.8", "1.24.6", "1.25.4"]
envoy-version: ["1.23.8", "1.24.6", "1.25.4", "1.26.0"]
xds-target: ["server", "client"]
test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }}
env:
ENVOY_VERSION: ${{ matrix.envoy-version }}
XDS_TARGET: ${{ matrix.xds-target }}
AWS_LAMBDA_REGION: us-west-2
steps:
# NOTE: ENT specific step as we store secrets in Vault.
- name: Authenticate to Vault
if: ${{ endsWith(github.repository, '-enterprise') }}
id: vault-auth
run: vault-auth

# NOTE: ENT specific step as we store secrets in Vault.
- name: Fetch Secrets
if: ${{ endsWith(github.repository, '-enterprise') }}
id: secrets
uses: hashicorp/vault-action@v2.5.0
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
token: ${{ steps.vault-auth.outputs.token }}
secrets: |
kv/data/github/${{ github.repository }}/aws arn | AWS_ROLE_ARN ;

- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
Expand Down Expand Up @@ -273,10 +253,11 @@ jobs:
run: |
cd ./test/integration/consul-container
NUM_RUNNERS=$TOTAL_RUNNERS
NUM_DIRS=$(find ./test -maxdepth 2 -type d | wc -l)
NUM_DIRS=$(find ./test -mindepth 1 -maxdepth 2 -type d | wc -l)

if [ "$NUM_DIRS" -lt "$NUM_RUNNERS" ]; then
echo "TOTAL_RUNNERS is larger than the number of tests/packages to split."
NUM_RUNNERS=$NUM_DIRS
NUM_RUNNERS=$((NUM_DIRS-1))
fi
# fix issue where test splitting calculation generates 1 more split than TOTAL_RUNNERS.
NUM_RUNNERS=$((NUM_RUNNERS-1))
Expand Down Expand Up @@ -379,9 +360,10 @@ jobs:
cd ./test/integration/consul-container/test/upgrade
NUM_RUNNERS=$TOTAL_RUNNERS
NUM_DIRS=$(go test ./... -list=. -json | jq -r '.Output | select (. !=null) | select(. | startswith("Test")) | gsub("[\\n\\t]"; "")' | wc -l)

if [ "$NUM_DIRS" -lt "$NUM_RUNNERS" ]; then
echo "TOTAL_RUNNERS is larger than the number of tests/packages to split."
NUM_RUNNERS=$NUM_DIRS
NUM_RUNNERS=$((NUM_DIRS-1))
fi
# fix issue where test splitting calculation generates 1 more split than TOTAL_RUNNERS.
NUM_RUNNERS=$((NUM_RUNNERS-1))
Expand Down
25 changes: 15 additions & 10 deletions agent/consul/state/intention.go
Original file line number Diff line number Diff line change
Expand Up @@ -1079,16 +1079,21 @@ func (s *Store) intentionTopologyTxn(
}
addSvcs(tempServices)

// Query the virtual ip table as well to include virtual services that don't have a registered instance yet.
vipIndex, vipServices, err := servicesVirtualIPsTxn(tx)
if err != nil {
return index, nil, fmt.Errorf("failed to list service virtual IPs: %v", err)
}
for _, svc := range vipServices {
services[svc.Service.ServiceName] = struct{}{}
}
if vipIndex > index {
index = vipIndex
if !downstreams {
// Query the virtual ip table as well to include virtual services that don't have a registered instance yet.
// We only need to do this for upstreams currently, so that tproxy can find which discovery chains should be
// contacted for failover scenarios. Virtual services technically don't need to be considered as downstreams,
// because they will take on the identity of the calling service, rather than the chain itself.
vipIndex, vipServices, err := servicesVirtualIPsTxn(tx)
if err != nil {
return index, nil, fmt.Errorf("failed to list service virtual IPs: %v", err)
}
for _, svc := range vipServices {
services[svc.Service.ServiceName] = struct{}{}
}
if vipIndex > index {
index = vipIndex
}
}
} else {
// destinations can only ever be upstream, since they are only allowed as intention destination.
Expand Down
12 changes: 6 additions & 6 deletions agent/grpc-external/services/resource/delete_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,17 +30,17 @@ func TestDelete_InputValidation(t *testing.T) {
"no tenancy": func(req *pbresource.DeleteRequest) { req.Id.Tenancy = nil },
"no name": func(req *pbresource.DeleteRequest) { req.Id.Name = "" },
// clone necessary to not pollute DefaultTenancy
"tenancy partition wildcard": func(req *pbresource.DeleteRequest) {
"tenancy partition not default": func(req *pbresource.DeleteRequest) {
req.Id.Tenancy = clone(req.Id.Tenancy)
req.Id.Tenancy.Partition = storage.Wildcard
req.Id.Tenancy.Partition = ""
},
"tenancy namespace wildcard": func(req *pbresource.DeleteRequest) {
"tenancy namespace not default": func(req *pbresource.DeleteRequest) {
req.Id.Tenancy = clone(req.Id.Tenancy)
req.Id.Tenancy.Namespace = storage.Wildcard
req.Id.Tenancy.Namespace = ""
},
"tenancy peername wildcard": func(req *pbresource.DeleteRequest) {
"tenancy peername not local": func(req *pbresource.DeleteRequest) {
req.Id.Tenancy = clone(req.Id.Tenancy)
req.Id.Tenancy.PeerName = storage.Wildcard
req.Id.Tenancy.PeerName = ""
},
}
for desc, modFn := range testCases {
Expand Down
12 changes: 6 additions & 6 deletions agent/grpc-external/services/resource/read_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,17 @@ func TestRead_InputValidation(t *testing.T) {
"no tenancy": func(req *pbresource.ReadRequest) { req.Id.Tenancy = nil },
"no name": func(req *pbresource.ReadRequest) { req.Id.Name = "" },
// clone necessary to not pollute DefaultTenancy
"tenancy partition wildcard": func(req *pbresource.ReadRequest) {
"tenancy partition not default": func(req *pbresource.ReadRequest) {
req.Id.Tenancy = clone(req.Id.Tenancy)
req.Id.Tenancy.Partition = storage.Wildcard
req.Id.Tenancy.Partition = ""
},
"tenancy namespace wildcard": func(req *pbresource.ReadRequest) {
"tenancy namespace not default": func(req *pbresource.ReadRequest) {
req.Id.Tenancy = clone(req.Id.Tenancy)
req.Id.Tenancy.Namespace = storage.Wildcard
req.Id.Tenancy.Namespace = ""
},
"tenancy peername wildcard": func(req *pbresource.ReadRequest) {
"tenancy peername not local": func(req *pbresource.ReadRequest) {
req.Id.Tenancy = clone(req.Id.Tenancy)
req.Id.Tenancy.PeerName = storage.Wildcard
req.Id.Tenancy.PeerName = ""
},
}
for desc, modFn := range testCases {
Expand Down
16 changes: 9 additions & 7 deletions agent/grpc-external/services/resource/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,17 +131,19 @@ func validateId(id *pbresource.ID, errorPrefix string) error {
return status.Errorf(codes.InvalidArgument, "%s.%s is required", errorPrefix, field)
}

// Revisit defaulting and non-namespaced resources post-1.16
var expected string
switch {
case id.Tenancy.Namespace == storage.Wildcard:
field = "tenancy.namespace"
case id.Tenancy.Partition == storage.Wildcard:
field = "tenancy.partition"
case id.Tenancy.PeerName == storage.Wildcard:
field = "tenancy.peername"
case id.Tenancy.Partition != "default":
field, expected = "partition", "default"
case id.Tenancy.Namespace != "default":
field, expected = "namespace", "default"
case id.Tenancy.PeerName != "local":
field, expected = "peername", "local"
}

if field != "" {
return status.Errorf(codes.InvalidArgument, "%s.%s cannot be a wildcard", errorPrefix, field)
return status.Errorf(codes.InvalidArgument, "%s.tenancy.%s must be %s", errorPrefix, field, expected)
}
return nil
}
Expand Down
5 changes: 1 addition & 4 deletions agent/grpc-external/services/resource/write.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,7 @@ func (s *Server) Write(ctx context.Context, req *pbresource.WriteRequest) (*pbre
return errUseWriteStatus
}

// Enforce same tenancy for owner
if input.Owner != nil && !proto.Equal(input.Id.Tenancy, input.Owner.Tenancy) {
return status.Errorf(codes.InvalidArgument, "owner and resource tenancy must be the same")
}
// TODO(spatel): Revisit owner<->resource tenancy rules post-1.16

// Update path.
case err == nil:
Expand Down
51 changes: 12 additions & 39 deletions agent/grpc-external/services/resource/write_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,17 +36,17 @@ func TestWrite_InputValidation(t *testing.T) {
"no name": func(req *pbresource.WriteRequest) { req.Resource.Id.Name = "" },
"no data": func(req *pbresource.WriteRequest) { req.Resource.Data = nil },
// clone necessary to not pollute DefaultTenancy
"tenancy partition wildcard": func(req *pbresource.WriteRequest) {
"tenancy partition not default": func(req *pbresource.WriteRequest) {
req.Resource.Id.Tenancy = clone(req.Resource.Id.Tenancy)
req.Resource.Id.Tenancy.Partition = storage.Wildcard
req.Resource.Id.Tenancy.Partition = ""
},
"tenancy namespace wildcard": func(req *pbresource.WriteRequest) {
"tenancy namespace not default": func(req *pbresource.WriteRequest) {
req.Resource.Id.Tenancy = clone(req.Resource.Id.Tenancy)
req.Resource.Id.Tenancy.Namespace = storage.Wildcard
req.Resource.Id.Tenancy.Namespace = ""
},
"tenancy peername wildcard": func(req *pbresource.WriteRequest) {
"tenancy peername not local": func(req *pbresource.WriteRequest) {
req.Resource.Id.Tenancy = clone(req.Resource.Id.Tenancy)
req.Resource.Id.Tenancy.PeerName = storage.Wildcard
req.Resource.Id.Tenancy.PeerName = ""
},
"wrong data type": func(req *pbresource.WriteRequest) {
var err error
Expand Down Expand Up @@ -99,24 +99,24 @@ func TestWrite_OwnerValidation(t *testing.T) {
errorContains: "resource.owner.name",
},
// clone necessary to not pollute DefaultTenancy
"owner tenancy partition wildcard": {
"owner tenancy partition not default": {
modReqFn: func(req *pbresource.WriteRequest) {
req.Resource.Owner.Tenancy = clone(req.Resource.Owner.Tenancy)
req.Resource.Owner.Tenancy.Partition = storage.Wildcard
req.Resource.Owner.Tenancy.Partition = ""
},
errorContains: "resource.owner.tenancy.partition",
},
"owner tenancy namespace wildcard": {
"owner tenancy namespace not default": {
modReqFn: func(req *pbresource.WriteRequest) {
req.Resource.Owner.Tenancy = clone(req.Resource.Owner.Tenancy)
req.Resource.Owner.Tenancy.Namespace = storage.Wildcard
req.Resource.Owner.Tenancy.Namespace = ""
},
errorContains: "resource.owner.tenancy.namespace",
},
"owner tenancy peername wildcard": {
"owner tenancy peername not local": {
modReqFn: func(req *pbresource.WriteRequest) {
req.Resource.Owner.Tenancy = clone(req.Resource.Owner.Tenancy)
req.Resource.Owner.Tenancy.PeerName = storage.Wildcard
req.Resource.Owner.Tenancy.PeerName = ""
},
errorContains: "resource.owner.tenancy.peername",
},
Expand Down Expand Up @@ -491,33 +491,6 @@ func TestWrite_Owner_Immutable(t *testing.T) {
require.ErrorContains(t, err, "owner cannot be changed")
}

func TestWrite_Owner_RequireSameTenancy(t *testing.T) {
server := testServer(t)
client := testClient(t, server)

demo.Register(server.Registry)

artist, err := demo.GenerateV2Artist()
require.NoError(t, err)
rsp1, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: artist})
require.NoError(t, err)

// change album tenancy to be different from artist tenancy
album, err := demo.GenerateV2Album(rsp1.Resource.Id)
require.NoError(t, err)
album.Owner.Tenancy = &pbresource.Tenancy{
Partition: "some",
Namespace: "other",
PeerName: "tenancy",
}

// verify create fails
_, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: album})
require.Error(t, err)
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
require.ErrorContains(t, err, "tenancy must be the same")
}

type blockOnceBackend struct {
storage.Backend

Expand Down
Loading