Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Integration test for permissive mTLS #17205

Merged
merged 4 commits into from
May 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 11 additions & 6 deletions test/integration/consul-container/libs/service/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,13 @@ func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, httpPort int
_ = serverService.Terminate()
})
sidecarCfg := SidecarConfig{
Name: fmt.Sprintf("%s-sidecar", svc.ID),
ServiceID: svc.ID,
Namespace: svc.Namespace,
EnableTProxy: svc.Proxy != nil && svc.Proxy.Mode == "transparent",
Name: fmt.Sprintf("%s-sidecar", svc.ID),
ServiceID: svc.ID,
Namespace: svc.Namespace,
EnableTProxy: svc.Connect != nil &&
svc.Connect.SidecarService != nil &&
svc.Connect.SidecarService.Proxy != nil &&
svc.Connect.SidecarService.Proxy.Mode == api.ProxyModeTransparent,
}
serverConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{svc.Port}, node) // bindPort not used
if err != nil {
Expand Down Expand Up @@ -129,8 +132,10 @@ func CreateAndRegisterStaticServerAndSidecarWithChecks(node libcluster.Agent, se
Port: serviceOpts.HTTPPort,
Connect: &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Proxy: &api.AgentServiceConnectProxyConfig{},
Port: serviceOpts.Connect.Port,
Proxy: &api.AgentServiceConnectProxyConfig{
Mode: api.ProxyMode(serviceOpts.Connect.Proxy.Mode),
},
Port: serviceOpts.Connect.Port,
},
},
Checks: api.AgentServiceChecks{
Expand Down
141 changes: 117 additions & 24 deletions test/integration/consul-container/test/tproxy/tproxy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,23 @@ package tproxy

import (
"context"
"fmt"
"testing"
"time"

"github.com/stretchr/testify/require"

"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
)

// TestTProxyService
// This test makes sure two services in the same datacenter have connectivity
var requestRetryTimer = &retry.Timer{Timeout: 120 * time.Second, Wait: 500 * time.Millisecond}

// TestTProxyService makes sure two services in the same datacenter have connectivity
// with transparent proxy enabled.
//
// Steps:
Expand All @@ -30,34 +33,82 @@ import (
func TestTProxyService(t *testing.T) {
t.Parallel()

cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
NumServers: 1,
NumClients: 2,
ApplyDefaultProxySettings: true,
BuildOpts: &libcluster.BuildOptions{
Datacenter: "dc1",
InjectAutoEncryption: true,
InjectGossipEncryption: true,
// TODO(rb): fix the test to not need the service/envoy stack to use :8500
AllowHTTPAnyway: true,
},
})
cluster := createCluster(t, 2) // 2 client agent pods

clientService := createServices(t, cluster)
_, adminPort := clientService.GetAdminAddr()

libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 1)
libassert.AssertContainerState(t, clientService, "running")
assertHTTPRequestToVirtualAddress(t, clientService)
assertHTTPRequestToVirtualAddress(t, clientService, "static-server")
}

func assertHTTPRequestToVirtualAddress(t *testing.T, clientService libservice.Service) {
timer := &retry.Timer{Timeout: 120 * time.Second, Wait: 500 * time.Millisecond}
// TestTProxyPermissiveMTLS makes sure that a service in permissive mTLS mode accepts
// non-mesh traffic on the upstream service's port.
//
// Steps:
// - Create a single server cluster
// - Create the static-server and static-client services in the mesh
// - In default/strict mTLS mode, check that requests to static-server's
// virtual address succeed, but requests from outside the mesh fail.
// - In permissive mTLS mode, check that both requests to static-server's
// virtual addresss succeed and that requests from outside the mesh to
// the static-server's regular address/port succeed.
func TestTProxyPermissiveMTLS(t *testing.T) {
t.Parallel()

// Create three client "pods" each running a client agent and (optionally) a service:
// cluster.Agents[0] - consul server
// cluster.Agents[1] - static-client
// cluster.Agents[2] - static-server
// cluster.Agents[3] - (no service)
// We run curl requests from cluster.Agents[3] to simulate requests from outside the mesh.
cluster := createCluster(t, 3)

staticServerPod := cluster.Agents[1]
johnlanda marked this conversation as resolved.
Show resolved Hide resolved
nonMeshPod := cluster.Agents[3]

clientService := createServices(t, cluster)
_, adminPort := clientService.GetAdminAddr()

libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 1)
libassert.AssertContainerState(t, clientService, "running")

retry.RunWith(timer, t, func(r *retry.R) {
// Validate mesh traffic to the virtual address succeeds in strict/default mTLS mode.
assertHTTPRequestToVirtualAddress(t, clientService, "static-server")
// Validate non-mesh is blocked in strict/default mTLS mode.
assertHTTPRequestToServiceAddress(t, nonMeshPod, staticServerPod, "static-server", false)

// Put the service in permissive mTLS mode
require.NoError(t, cluster.ConfigEntryWrite(&api.MeshConfigEntry{
AllowEnablingPermissiveMutualTLS: true,
}))
require.NoError(t, cluster.ConfigEntryWrite(&api.ServiceConfigEntry{
Kind: api.ServiceDefaults,
Name: libservice.StaticServerServiceName,
MutualTLSMode: api.MutualTLSModePermissive,
}))

// Validate mesh traffic to the virtual address succeeds in permissive mTLS mode.
assertHTTPRequestToVirtualAddress(t, clientService, "static-server")
// Validate non-mesh traffic succeeds in permissive mode.
assertHTTPRequestToServiceAddress(t, nonMeshPod, staticServerPod, "static-server", true)
}

// assertHTTPRequestToVirtualAddress checks that a request to the
// static-server's virtual address succeeds by running curl in the given
// `clientService` container.
//
// This assumes the destination service is running Fortio. The request is made
// to `<serverName>.virtual.consul/debug?env=dump` and this checks that
// `FORTIO_NAME=<serverName>` is contained in the response.
func assertHTTPRequestToVirtualAddress(t *testing.T, clientService libservice.Service, serverName string) {
virtualHostname := fmt.Sprintf("%s.virtual.consul", serverName)

retry.RunWith(requestRetryTimer, t, func(r *retry.R) {
// Test that we can make a request to the virtual ip to reach the upstream.
//
// This uses a workaround for DNS because I had trouble modifying
// NOTE(pglass): This uses a workaround for DNS because I had trouble modifying
// /etc/resolv.conf. There is a --dns option to docker run, but it
// didn't seem to be exposed via testcontainers. I'm not sure if it would
// do what I want. In any case, Docker sets up /etc/resolv.conf for certain
Expand All @@ -77,21 +128,63 @@ func assertHTTPRequestToVirtualAddress(t *testing.T, clientService libservice.Se
// make the request as root.
out, err := clientService.Exec(
context.Background(),
[]string{"sudo", "sh", "-c", `
[]string{"sudo", "sh", "-c", fmt.Sprintf(`
set -e
VIRTUAL=$(dig @localhost +short static-server.virtual.consul)
VIRTUAL=$(dig @localhost +short %[1]s)
echo "Virtual IP: $VIRTUAL"
curl -s "$VIRTUAL/debug?env=dump"
`,
`, virtualHostname),
},
)
t.Logf("making call to upstream\nerr = %v\nout = %s", err, out)
t.Logf("curl request to upstream virtual address\nerr = %v\nout = %s", err, out)
require.NoError(r, err)
require.Regexp(r, `Virtual IP: 240.0.0.\d+`, out)
require.Contains(r, out, "FORTIO_NAME=static-server")
require.Contains(r, out, fmt.Sprintf("FORTIO_NAME=%s", serverName))
})
}

// assertHTTPRequestToServiceAddress checks the result of a request from the
// given `client` container to the given `server` container. If expSuccess is
// true, this checks for a successful request and otherwise it checks for the
// error we expect when traffic is rejected by mTLS.
//
// This assumes the destination service is running Fortio. It makes the request
// to `<serverIP>:8080/debug?env=dump` and checks for `FORTIO_NAME=<expServiceName>`
// in the response.
func assertHTTPRequestToServiceAddress(t *testing.T, client, server libcluster.Agent, expServiceName string, expSuccess bool) {
upstreamURL := fmt.Sprintf("http://%s:8080/debug?env=dump", server.GetIP())
retry.RunWith(requestRetryTimer, t, func(r *retry.R) {
out, err := client.Exec(context.Background(), []string{"curl", "-s", upstreamURL})
t.Logf("curl request to upstream service address: url=%s\nerr = %v\nout = %s", upstreamURL, err, out)

if expSuccess {
require.NoError(r, err)
require.Contains(r, out, fmt.Sprintf("FORTIO_NAME=%s", expServiceName))
} else {
require.Error(r, err)
require.Contains(r, err.Error(), "exit code 52")
}
})
}

func createCluster(t *testing.T, numClients int) *libcluster.Cluster {
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
NumServers: 1,
NumClients: numClients,
ApplyDefaultProxySettings: true,
BuildOpts: &libcluster.BuildOptions{
Datacenter: "dc1",
InjectAutoEncryption: true,
InjectGossipEncryption: true,
// TODO(rb): fix the test to not need the service/envoy stack to use :8500
AllowHTTPAnyway: true,
},
})
return cluster
}

// createServices creates the static-client and static-server services with
// transparent proxy enabled. It returns a Service for the static-client.
func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service {
{
node := cluster.Agents[1]
Expand Down