-
Notifications
You must be signed in to change notification settings - Fork 324
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
peering: expose servers over K8s service #1371
Changes from all commits
7c39c91
8200f92
6c9e058
af1ea8e
7846bec
d430707
c520c53
4a6febc
6fdb6b8
1a8fcd1
82c0ac8
1ddec89
f3888de
6cd03a6
4aabc9d
5b18912
c5197a6
f812dca
dd7ebfc
2add121
20d2a03
c345dac
8ebf79a
d6330fb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,6 +5,7 @@ import ( | |
"fmt" | ||
"strconv" | ||
"testing" | ||
"time" | ||
|
||
terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" | ||
"github.com/hashicorp/consul-k8s/acceptance/framework/consul" | ||
|
@@ -13,6 +14,7 @@ import ( | |
"github.com/hashicorp/consul-k8s/acceptance/framework/k8s" | ||
"github.com/hashicorp/consul-k8s/acceptance/framework/logger" | ||
"github.com/hashicorp/consul/api" | ||
"github.com/hashicorp/consul/sdk/testutil/retry" | ||
"github.com/hashicorp/go-version" | ||
"github.com/stretchr/testify/require" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
|
@@ -76,7 +78,7 @@ func TestPeering_ConnectNamespaces(t *testing.T) { | |
"global.peering.enabled": "true", | ||
"global.enableConsulNamespaces": "true", | ||
|
||
"global.image": "thisisnotashwin/consul@sha256:446aad6e02f66e3027756dfc0d34e8e6e2b11ac6ec5637b134b34644ca7cda64", | ||
"global.image": "ndhanushkodi/consul-dev@sha256:61b02ac369cc13db6b9af8808b7e3a811bcdc9a09c95ddac0da931f81743091c", | ||
|
||
"global.tls.enabled": "false", | ||
"global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), | ||
|
@@ -95,8 +97,10 @@ func TestPeering_ConnectNamespaces(t *testing.T) { | |
|
||
"controller.enabled": "true", | ||
|
||
"dns.enabled": "true", | ||
"dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), | ||
"dns.enabled": "true", | ||
"dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), | ||
"server.replicas": "3", | ||
"server.bootstrapExpect": "3", | ||
ndhanushkodi marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
|
||
staticServerPeerHelmValues := map[string]string{ | ||
|
@@ -110,14 +114,18 @@ func TestPeering_ConnectNamespaces(t *testing.T) { | |
staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" | ||
staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" | ||
staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" | ||
staticServerPeerHelmValues["server.exposeService.type"] = "NodePort" | ||
staticServerPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" | ||
staticServerPeerHelmValues["server.replicas"] = "1" | ||
staticServerPeerHelmValues["server.bootstrapExpect"] = "1" | ||
} | ||
|
||
releaseName := helpers.RandomName() | ||
|
||
helpers.MergeMaps(staticServerPeerHelmValues, commonHelmValues) | ||
helpers.MergeMaps(commonHelmValues, staticServerPeerHelmValues) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we swap the order here so that we deploy static server cluster and the static client cluster with There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I removed the replicas 3 for servers, and made it a non-kind specific config instead! |
||
|
||
// Install the first peer where static-server will be deployed in the static-server kubernetes context. | ||
staticServerPeerCluster := consul.NewHelmCluster(t, staticServerPeerHelmValues, staticServerPeerClusterContext, cfg, releaseName) | ||
staticServerPeerCluster := consul.NewHelmCluster(t, commonHelmValues, staticServerPeerClusterContext, cfg, releaseName) | ||
staticServerPeerCluster.Create(t) | ||
|
||
staticClientPeerHelmValues := map[string]string{ | ||
|
@@ -128,12 +136,16 @@ func TestPeering_ConnectNamespaces(t *testing.T) { | |
staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" | ||
staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" | ||
staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" | ||
staticClientPeerHelmValues["server.exposeService.type"] = "NodePort" | ||
staticClientPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" | ||
staticServerPeerHelmValues["server.replicas"] = "1" | ||
staticServerPeerHelmValues["server.bootstrapExpect"] = "1" | ||
} | ||
|
||
helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) | ||
helpers.MergeMaps(commonHelmValues, staticClientPeerHelmValues) | ||
|
||
// Install the second peer where static-client will be deployed in the static-client kubernetes context. | ||
staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) | ||
staticClientPeerCluster := consul.NewHelmCluster(t, commonHelmValues, staticClientPeerClusterContext, cfg, releaseName) | ||
staticClientPeerCluster.Create(t) | ||
|
||
// Create the peering acceptor on the client peer. | ||
|
@@ -142,6 +154,14 @@ func TestPeering_ConnectNamespaces(t *testing.T) { | |
k8s.KubectlDelete(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") | ||
}) | ||
|
||
// Ensure the secret is created. | ||
timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} | ||
retry.RunWith(timer, t, func(r *retry.R) { | ||
acceptorSecretResourceVersion, err := k8s.RunKubectlAndGetOutputE(t, staticClientPeerClusterContext.KubectlOptions(t), "get", "peeringacceptor", "server", "-o", "jsonpath={.status.secret.resourceVersion}") | ||
require.NoError(r, err) | ||
require.NotEmpty(r, acceptorSecretResourceVersion) | ||
}) | ||
|
||
// Copy secret from client peer to server peer. | ||
k8s.CopySecret(t, staticClientPeerClusterContext, staticServerPeerClusterContext, "api-token") | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
{{- $serverEnabled := (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) -}} | ||
{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") (or .Values.global.peering.enabled .Values.global.adminPartitions.enabled))) -}} | ||
{{- if (and $serverEnabled $serverExposeServiceEnabled) }} | ||
|
||
# Service with an external IP to reach Consul servers. | ||
# Used for exposing gRPC port for peering and ports for client partitions to discover servers. | ||
apiVersion: v1 | ||
kind: Service | ||
metadata: | ||
name: {{ template "consul.fullname" . }}-expose-servers | ||
namespace: {{ .Release.Namespace }} | ||
labels: | ||
app: {{ template "consul.name" . }} | ||
chart: {{ template "consul.chart" . }} | ||
heritage: {{ .Release.Service }} | ||
release: {{ .Release.Name }} | ||
component: server | ||
annotations: | ||
{{- if .Values.server.exposeService.annotations }} | ||
{{ tpl .Values.server.exposeService.annotations . | nindent 4 | trim }} | ||
{{- end }} | ||
spec: | ||
type: "{{ .Values.server.exposeService.type }}" | ||
ports: | ||
{{- if (or (not .Values.global.tls.enabled) (not .Values.global.tls.httpsOnly)) }} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do we want to include port 8500? On EKS, load balancers will health check to the first port in the service. So previously, when only 8501 was here, and TLS was disabled, the load balancer would kick off all the servers as unhealthy endpoints. So unless we are always going to require TLS, we need to keep port 8500. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I still need to add TLS helm tests based on a decision on this comment. |
||
- name: http | ||
port: 8500 | ||
targetPort: 8500 | ||
{{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.https) }} | ||
nodePort: {{ .Values.server.exposeService.nodePort.http }} | ||
{{- end }} | ||
{{- end }} | ||
{{- if .Values.global.tls.enabled }} | ||
- name: https | ||
port: 8501 | ||
targetPort: 8501 | ||
{{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.https) }} | ||
nodePort: {{ .Values.server.exposeService.nodePort.https }} | ||
{{- end }} | ||
{{- end }} | ||
- name: serflan | ||
port: 8301 | ||
targetPort: 8301 | ||
{{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.serf) }} | ||
nodePort: {{ .Values.server.exposeService.nodePort.serf }} | ||
{{- end }} | ||
- name: rpc | ||
port: 8300 | ||
targetPort: 8300 | ||
{{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.rpc) }} | ||
nodePort: {{ .Values.server.exposeService.nodePort.rpc }} | ||
{{- end }} | ||
- name: grpc | ||
port: 8503 | ||
targetPort: 8503 | ||
{{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.grpc) }} | ||
nodePort: {{ .Values.server.exposeService.nodePort.grpc }} | ||
{{- end }} | ||
selector: | ||
app: {{ template "consul.name" . }} | ||
release: "{{ .Release.Name }}" | ||
component: server | ||
{{- end }} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We need to try this connection for a bit longer, because the peering connection needs to get set up and the exported service needs to make it over to the importing side, and the dialer is likely retrying a few times to reach the leader, so it makes sense that this takes a bit longer than it usually does.