From 3adc4916103a4d21ab1a8d23d37e83e284294e66 Mon Sep 17 00:00:00 2001 From: Nick Irvine <115657443+nfi-hashicorp@users.noreply.github.com> Date: Mon, 28 Aug 2023 17:06:18 -0700 Subject: [PATCH] test-integ/peering: peer through mesh gateway [NET-4609] (#6747) Co-authored-by: Matt Keeler --- .../ac7_2_rotate_leader_test.go | 3 +- test-integ/peering_commontopo/commontopo.go | 13 ++- testing/deployer/sprawl/acl_rules.go | 83 +++++++++++++------ testing/deployer/sprawl/peering.go | 11 ++- 4 files changed, 80 insertions(+), 30 deletions(-) diff --git a/test-integ/peering_commontopo/ac7_2_rotate_leader_test.go b/test-integ/peering_commontopo/ac7_2_rotate_leader_test.go index 986e015a0244..b1ba326741c9 100644 --- a/test-integ/peering_commontopo/ac7_2_rotate_leader_test.go +++ b/test-integ/peering_commontopo/ac7_2_rotate_leader_test.go @@ -196,7 +196,8 @@ func (s *ac7_2RotateLeaderSuite) test(t *testing.T, ct *commonTopo) { func rotateLeader(t *testing.T, cl *api.Client) { t.Helper() oldLeader := findLeader(t, cl) - cl.Operator().RaftLeaderTransfer(nil) + _, err := cl.Operator().RaftLeaderTransfer(nil) + require.NoError(t, err) retry.RunWith(&retry.Timer{Timeout: 30 * time.Second, Wait: time.Second}, t, func(r *retry.R) { newLeader := findLeader(r, cl) require.NotEqual(r, oldLeader.ID, newLeader.ID) diff --git a/test-integ/peering_commontopo/commontopo.go b/test-integ/peering_commontopo/commontopo.go index d0e2c8e55dcb..8ab452eebed3 100644 --- a/test-integ/peering_commontopo/commontopo.go +++ b/test-integ/peering_commontopo/commontopo.go @@ -57,12 +57,14 @@ func NewCommonTopo(t *testing.T) *commonTopo { ct := commonTopo{} + const nServers = 3 + // Make 3-server clusters in dc1 and dc2 // For simplicity, the Name and Datacenter of the clusters are the same. // dc1 and dc2 should be symmetric. - dc1 := clusterWithJustServers("dc1", 3) + dc1 := clusterWithJustServers("dc1", nServers) ct.DC1 = dc1 - dc2 := clusterWithJustServers("dc2", 3) + dc2 := clusterWithJustServers("dc2", nServers) ct.DC2 = dc2 // dc3 is a failover cluster for both dc1 and dc2 dc3 := clusterWithJustServers("dc3", 1) @@ -366,6 +368,11 @@ func setupGlobals(clu *topology.Cluster) { Mode: api.MeshGatewayModeLocal, }, }, + &api.MeshConfigEntry{ + Peering: &api.PeeringMeshConfig{ + PeerThroughMeshGateways: true, + }, + }, ) } } @@ -393,7 +400,7 @@ func clusterWithJustServers(name string, numServers int) *topology.Cluster { Nodes: newTopologyServerSet( name+"-server", numServers, - []string{name, "wan"}, + []string{name}, nil, ), } diff --git a/testing/deployer/sprawl/acl_rules.go b/testing/deployer/sprawl/acl_rules.go index 036149cdfd6c..1c8224fc4164 100644 --- a/testing/deployer/sprawl/acl_rules.go +++ b/testing/deployer/sprawl/acl_rules.go @@ -107,20 +107,26 @@ func tokenForService(svc *topology.Service, overridePolicy *api.ACLPolicy, enter return token } -func policyForMeshGateway(svc *topology.Service, enterprise bool) *api.ACLPolicy { - policyName := "mesh-gateway--" + svc.ID.ACLString() - - policy := &api.ACLPolicy{ - Name: policyName, - Description: policyName, - } - if enterprise { - policy.Partition = svc.ID.Partition - policy.Namespace = "default" - } +const ( + meshGatewayCommunityRules = ` +service "mesh-gateway" { + policy = "write" +} +service_prefix "" { + policy = "read" +} +node_prefix "" { + policy = "read" +} +agent_prefix "" { + policy = "read" +} +# for peering +mesh = "write" +peering = "read" +` - if enterprise { - policy.Rules = ` + meshGatewayEntDefaultRules = ` namespace_prefix "" { service "mesh-gateway" { policy = "write" @@ -137,26 +143,53 @@ agent_prefix "" { } # for peering mesh = "write" -peering = "read" -` - } else { - policy.Rules = ` -service "mesh-gateway" { - policy = "write" -} -service_prefix "" { - policy = "read" + +partition_prefix "" { + peering = "read" } -node_prefix "" { - policy = "read" +` + + meshGatewayEntNonDefaultRules = ` +namespace_prefix "" { + service "mesh-gateway" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } } agent_prefix "" { policy = "read" } # for peering mesh = "write" -peering = "read" ` +) + +func policyForMeshGateway(svc *topology.Service, enterprise bool) *api.ACLPolicy { + policyName := "mesh-gateway--" + svc.ID.ACLString() + + policy := &api.ACLPolicy{ + Name: policyName, + Description: policyName, + } + if enterprise { + fmt.Printf("Enterprise mgw ACLS - Partition: %s, Namespace: default", svc.ID.Partition) + policy.Partition = svc.ID.Partition + policy.Namespace = "default" + } + + if enterprise { + if svc.ID.Partition == "default" { + policy.Rules = meshGatewayEntDefaultRules + } else { + policy.Rules = meshGatewayEntNonDefaultRules + } + } else { + policy.Rules = meshGatewayCommunityRules } return policy diff --git a/testing/deployer/sprawl/peering.go b/testing/deployer/sprawl/peering.go index 5275161cfd9a..94d04b0f29db 100644 --- a/testing/deployer/sprawl/peering.go +++ b/testing/deployer/sprawl/peering.go @@ -5,7 +5,9 @@ package sprawl import ( "context" + "errors" "fmt" + "net/http" "strings" "time" @@ -89,7 +91,14 @@ func (s *Sprawl) initPeerings() error { time.Sleep(50 * time.Millisecond) goto ESTABLISH } - return fmt.Errorf("error establishing peering with token for %q: %w", peering.String(), err) + // Establish and friends return an api.StatusError value, not pointer + // not sure if this is weird + var asStatusError api.StatusError + if errors.As(err, &asStatusError) && asStatusError.Code == http.StatusGatewayTimeout { + time.Sleep(50 * time.Millisecond) + goto ESTABLISH + } + return fmt.Errorf("error establishing peering with token for %q: %#v", peering.String(), err) } logger.Info("peering established", "peering", peering.String())