-
Notifications
You must be signed in to change notification settings - Fork 324
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
peering: support peering establishment over mesh gateways, remove ability to pass external addresses to token generation endpoint #1610
Changes from 17 commits
2f52a5e
e002fe4
908a585
3caf84a
e696145
bfc0bf0
eefa004
3485e26
42994da
2fde3bf
e66b034
abd53e1
cefc722
5ea134f
fb6d811
688bf18
f3782ae
6a97224
da61b7d
5e4f6c8
aa7391a
06a9ec7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -96,7 +96,7 @@ func CheckStaticServerConnectionMultipleFailureMessages(t *testing.T, options *k | |
expectedOutput = expectedSuccessOutput | ||
} | ||
|
||
retrier := &retry.Timer{Timeout: 160 * time.Second, Wait: 2 * time.Second} | ||
retrier := &retry.Timer{Timeout: 320 * time.Second, Wait: 2 * time.Second} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This was because I noticed that by the time the peering is established and the exported service is actually replicated to the other side, this curl was timing out. It is taking a bit longer for the peering to get set up-- things that cause this are dialing a follower node and needing to retry the peering, a delta xds bug that causes the mesh gateways to take extra time to get their config. |
||
|
||
args := []string{"exec", "deploy/" + sourceApp, "-c", sourceApp, "--", "curl", "-vvvsSf"} | ||
args = append(args, curlArgs...) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
resources: | ||
- meshpeering.yaml |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
apiVersion: consul.hashicorp.com/v1alpha1 | ||
kind: Mesh | ||
metadata: | ||
name: mesh | ||
spec: | ||
peering: | ||
peerThroughMeshGateways: true |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -53,10 +53,14 @@ func TestPeering_Connect(t *testing.T) { | |
|
||
for _, c := range cases { | ||
t.Run(c.name, func(t *testing.T) { | ||
fmt.Println("starting test") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we replace the Println's with Logger.Log or t.Log or something? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. whoops thanks i forgot to remove those, I was just using those locally so i could see what was going on while running the test! |
||
staticServerPeerClusterContext := env.DefaultContext(t) | ||
staticClientPeerClusterContext := env.Context(t, environment.SecondaryContextName) | ||
|
||
commonHelmValues := map[string]string{ | ||
"global.imageK8S": "ndhanushkodi/consul-k8s-dev:pmgw1", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Don't forget these before merge! There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks! |
||
"global.image": "ndhanushkodi/consul-dev:peermeshgw2", | ||
|
||
"global.peering.enabled": "true", | ||
|
||
"global.tls.enabled": "true", | ||
|
@@ -73,6 +77,7 @@ func TestPeering_Connect(t *testing.T) { | |
|
||
"dns.enabled": "true", | ||
"dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), | ||
"peering.tokenGeneration.serverAddresses.source": "consul", | ||
} | ||
|
||
staticServerPeerHelmValues := map[string]string{ | ||
|
@@ -90,14 +95,13 @@ func TestPeering_Connect(t *testing.T) { | |
staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" | ||
staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" | ||
staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" | ||
staticServerPeerHelmValues["server.exposeService.type"] = "NodePort" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. QQ: why are we removing the grpc port configs for each of these? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We no longer need the exposeService at all for peering connections, so this removes any configuration of it. |
||
staticServerPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" | ||
} | ||
|
||
releaseName := helpers.RandomName() | ||
|
||
helpers.MergeMaps(staticServerPeerHelmValues, commonHelmValues) | ||
|
||
fmt.Println("install first peer") | ||
// Install the first peer where static-server will be deployed in the static-server kubernetes context. | ||
staticServerPeerCluster := consul.NewHelmCluster(t, staticServerPeerHelmValues, staticServerPeerClusterContext, cfg, releaseName) | ||
staticServerPeerCluster.Create(t) | ||
|
@@ -107,23 +111,38 @@ func TestPeering_Connect(t *testing.T) { | |
} | ||
|
||
if !cfg.UseKind { | ||
staticServerPeerHelmValues["server.replicas"] = "3" | ||
staticClientPeerHelmValues["server.replicas"] = "3" | ||
} | ||
|
||
if cfg.UseKind { | ||
staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" | ||
staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" | ||
staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" | ||
staticClientPeerHelmValues["server.exposeService.type"] = "NodePort" | ||
staticClientPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" | ||
} | ||
|
||
helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) | ||
|
||
fmt.Println("install second (client) peer") | ||
// Install the second peer where static-client will be deployed in the static-client kubernetes context. | ||
staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) | ||
staticClientPeerCluster.Create(t) | ||
|
||
fmt.Println("configure mesh resource so peering token will have mesh gateway addrs") | ||
// Create Mesh resource to use mesh gateways. | ||
logger.Log(t, "creating mesh config") | ||
kustomizeMeshDir := "../fixtures/bases/mesh-peering" | ||
|
||
k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) | ||
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { | ||
k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) | ||
}) | ||
|
||
k8s.KubectlApplyK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) | ||
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { | ||
k8s.KubectlDeleteK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) | ||
}) | ||
|
||
fmt.Println("create acceptor on (client) peer") | ||
// Create the peering acceptor on the client peer. | ||
k8s.KubectlApply(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") | ||
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { | ||
|
@@ -138,9 +157,13 @@ func TestPeering_Connect(t *testing.T) { | |
require.NotEmpty(r, acceptorSecretName) | ||
}) | ||
|
||
fmt.Println("copying secret from client peer to server peer, printing contents:") | ||
// Copy secret from client peer to server peer. | ||
k8s.CopySecret(t, staticClientPeerClusterContext, staticServerPeerClusterContext, "api-token") | ||
resp, _ := k8s.RunKubectlAndGetOutputE(t, staticClientPeerClusterContext.KubectlOptions(t), "secret", "api-token", "-o", "yaml") | ||
fmt.Println(resp) | ||
ndhanushkodi marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
fmt.Println("creating peering dialer on server peer") | ||
// Create the peering dialer on the server peer. | ||
k8s.KubectlApply(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-dialer.yaml") | ||
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { | ||
|
@@ -261,6 +284,7 @@ func TestPeering_Connect(t *testing.T) { | |
} else { | ||
k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") | ||
} | ||
|
||
}) | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -237,7 +237,7 @@ spec: | |
{{- if .Values.global.adminPartitions.enabled }} | ||
-service-partition={{ .Values.global.adminPartitions.name }} \ | ||
{{- end }} | ||
-log-level={{ default .Values.global.logLevel }} \ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. was this intentional? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nitya mentioned above that she will remove it before merge. I have not idea why it didn't show up in the diffs for review. 🤷 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. weird! |
||
-log-level="trace" \ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. will remove |
||
-log-json={{ .Values.global.logJSON }} | ||
livenessProbe: | ||
tcpSocket: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I am not sure but should the token changes from the helm chart be listed as a breaking change?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yup added it to breaking instead!