diff --git a/client/allocrunner/network_manager_linux.go b/client/allocrunner/network_manager_linux.go index 50f509b14166..26c2e0cd80b9 100644 --- a/client/allocrunner/network_manager_linux.go +++ b/client/allocrunner/network_manager_linux.go @@ -190,13 +190,14 @@ func newNetworkConfigurator(log hclog.Logger, alloc *structs.Allocation, config switch { case netMode == "bridge": - c, err := newBridgeNetworkConfigurator(log, config.BridgeNetworkName, config.BridgeNetworkAllocSubnet, config.BridgeNetworkHairpinMode, config.CNIPath, ignorePortMappingHostIP) + + c, err := newBridgeNetworkConfigurator(log, alloc, config.BridgeNetworkName, config.BridgeNetworkAllocSubnet, config.BridgeNetworkHairpinMode, config.CNIPath, ignorePortMappingHostIP, config.Node) if err != nil { return nil, err } return &synchronizedNetworkConfigurator{c}, nil case strings.HasPrefix(netMode, "cni/"): - c, err := newCNINetworkConfigurator(log, config.CNIPath, config.CNIInterfacePrefix, config.CNIConfigDir, netMode[4:], ignorePortMappingHostIP) + c, err := newCNINetworkConfigurator(log, config.CNIPath, config.CNIInterfacePrefix, config.CNIConfigDir, netMode[4:], ignorePortMappingHostIP, config.Node) if err != nil { return nil, err } diff --git a/client/allocrunner/networking_bridge_linux.go b/client/allocrunner/networking_bridge_linux.go index 89b7b2cb0b2d..acd8d4e6aaf6 100644 --- a/client/allocrunner/networking_bridge_linux.go +++ b/client/allocrunner/networking_bridge_linux.go @@ -43,7 +43,7 @@ type bridgeNetworkConfigurator struct { logger hclog.Logger } -func newBridgeNetworkConfigurator(log hclog.Logger, bridgeName, ipRange string, hairpinMode bool, cniPath string, ignorePortMappingHostIP bool) (*bridgeNetworkConfigurator, error) { +func newBridgeNetworkConfigurator(log hclog.Logger, alloc *structs.Allocation, bridgeName, ipRange string, hairpinMode bool, cniPath string, ignorePortMappingHostIP bool, node *structs.Node) (*bridgeNetworkConfigurator, error) { b := &bridgeNetworkConfigurator{ bridgeName: bridgeName, allocSubnet: ipRange, @@ -59,7 +59,20 @@ func newBridgeNetworkConfigurator(log hclog.Logger, bridgeName, ipRange string, b.allocSubnet = defaultNomadAllocSubnet } - c, err := newCNINetworkConfiguratorWithConf(log, cniPath, bridgeNetworkAllocIfPrefix, ignorePortMappingHostIP, buildNomadBridgeNetConfig(*b)) + var netCfg []byte + + tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup) + for _, svc := range tg.Services { + if svc.Connect.HasTransparentProxy() { + netCfg = buildNomadBridgeNetConfigForTProxy(*b) + break + } + } + if netCfg == nil { + netCfg = buildNomadBridgeNetConfig(*b) + } + + c, err := newCNINetworkConfiguratorWithConf(log, cniPath, bridgeNetworkAllocIfPrefix, ignorePortMappingHostIP, netCfg, node) if err != nil { return nil, err } @@ -147,6 +160,14 @@ func buildNomadBridgeNetConfig(b bridgeNetworkConfigurator) []byte { cniAdminChainName)) } +func buildNomadBridgeNetConfigForTProxy(b bridgeNetworkConfigurator) []byte { + return []byte(fmt.Sprintf(nomadCNIConfigTemplateForTProxy, + b.bridgeName, + b.hairpinMode, + b.allocSubnet, + cniAdminChainName)) +} + // Update website/content/docs/networking/cni.mdx when the bridge configuration // is modified. const nomadCNIConfigTemplate = `{ @@ -190,3 +211,49 @@ const nomadCNIConfigTemplate = `{ ] } ` + +const nomadCNIConfigTemplateForTProxy = `{ + "cniVersion": "0.4.0", + "name": "nomad", + "plugins": [ + { + "type": "loopback" + }, + { + "type": "bridge", + "bridge": %q, + "ipMasq": true, + "isGateway": true, + "forceAddress": true, + "hairpinMode": %v, + "ipam": { + "type": "host-local", + "ranges": [ + [ + { + "subnet": %q + } + ] + ], + "routes": [ + { "dst": "0.0.0.0/0" } + ] + } + }, + { + "type": "firewall", + "backend": "iptables", + "iptablesAdminChainName": %q + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + }, + { + "type": "consul-cni", + "log_level": "debug" + } + ] +} +` diff --git a/client/allocrunner/networking_cni.go b/client/allocrunner/networking_cni.go index 3641aebcb893..05c1faa9c59d 100644 --- a/client/allocrunner/networking_cni.go +++ b/client/allocrunner/networking_cni.go @@ -17,13 +17,16 @@ import ( "path/filepath" "regexp" "sort" + "strconv" "strings" "time" cni "github.com/containerd/go-cni" cnilibrary "github.com/containernetworking/cni/libcni" "github.com/coreos/go-iptables/iptables" + consulIPTables "github.com/hashicorp/consul/sdk/iptables" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" ) @@ -47,26 +50,30 @@ type cniNetworkConfigurator struct { cni cni.CNI cniConf []byte ignorePortMappingHostIP bool + nodeAttrs map[string]string + nodeMeta map[string]string rand *rand.Rand logger log.Logger } -func newCNINetworkConfigurator(logger log.Logger, cniPath, cniInterfacePrefix, cniConfDir, networkName string, ignorePortMappingHostIP bool) (*cniNetworkConfigurator, error) { +func newCNINetworkConfigurator(logger log.Logger, cniPath, cniInterfacePrefix, cniConfDir, networkName string, ignorePortMappingHostIP bool, node *structs.Node) (*cniNetworkConfigurator, error) { cniConf, err := loadCNIConf(cniConfDir, networkName) if err != nil { return nil, fmt.Errorf("failed to load CNI config: %v", err) } - return newCNINetworkConfiguratorWithConf(logger, cniPath, cniInterfacePrefix, ignorePortMappingHostIP, cniConf) + return newCNINetworkConfiguratorWithConf(logger, cniPath, cniInterfacePrefix, ignorePortMappingHostIP, cniConf, node) } -func newCNINetworkConfiguratorWithConf(logger log.Logger, cniPath, cniInterfacePrefix string, ignorePortMappingHostIP bool, cniConf []byte) (*cniNetworkConfigurator, error) { +func newCNINetworkConfiguratorWithConf(logger log.Logger, cniPath, cniInterfacePrefix string, ignorePortMappingHostIP bool, cniConf []byte, node *structs.Node) (*cniNetworkConfigurator, error) { conf := &cniNetworkConfigurator{ cniConf: cniConf, rand: rand.New(rand.NewSource(time.Now().Unix())), logger: logger, ignorePortMappingHostIP: ignorePortMappingHostIP, + nodeAttrs: node.Attributes, + nodeMeta: node.Meta, } if cniPath == "" { if cniPath = os.Getenv(envCNIPath); cniPath == "" { @@ -88,11 +95,34 @@ func newCNINetworkConfiguratorWithConf(logger log.Logger, cniPath, cniInterfaceP return conf, nil } +const ( + ConsulIPTablesConfigEnvVar = "IPTABLES_CONFIG" +) + // Setup calls the CNI plugins with the add action func (c *cniNetworkConfigurator) Setup(ctx context.Context, alloc *structs.Allocation, spec *drivers.NetworkIsolationSpec) (*structs.AllocNetworkStatus, error) { if err := c.ensureCNIInitialized(); err != nil { return nil, err } + argsMap := map[string]string{ + "IgnoreUnknown": "true", + } + + portMapping, portLabels := getPortMapping(alloc, c.ignorePortMappingHostIP) + + tproxyArgs, err := c.setupTransparentProxyArgs(alloc, spec, portMapping, portLabels) + if err != nil { + return nil, err + } + if tproxyArgs != nil { + iptablesCfg, err := json.Marshal(tproxyArgs) + if err != nil { + return nil, err + } + argsMap[ConsulIPTablesConfigEnvVar] = string(iptablesCfg) + } + + c.logger.Trace("CNI_ARGS", "args", argsMap) // Depending on the version of bridge cni plugin used, a known race could occure // where two alloc attempt to create the nomad bridge at the same time, resulting @@ -102,7 +132,10 @@ func (c *cniNetworkConfigurator) Setup(ctx context.Context, alloc *structs.Alloc var res *cni.Result for attempt := 1; ; attempt++ { var err error - if res, err = c.cni.Setup(ctx, alloc.ID, spec.Path, cni.WithCapabilityPortMap(getPortMapping(alloc, c.ignorePortMappingHostIP))); err != nil { + if res, err = c.cni.Setup(ctx, alloc.ID, spec.Path, + cni.WithCapabilityPortMap(portMapping), + cni.WithLabels(argsMap), + ); err != nil { c.logger.Warn("failed to configure network", "error", err, "attempt", attempt) switch attempt { case 1: @@ -123,8 +156,170 @@ func (c *cniNetworkConfigurator) Setup(ctx context.Context, alloc *structs.Alloc c.logger.Debug("received result from CNI", "result", string(resultJSON)) } - return c.cniToAllocNet(res) + allocNet, err := c.cniToAllocNet(res) + if err != nil { + return nil, err + } + + // prepend the Consul DNS to the nameservers, if we have it; we don't need + // the port because the iptables rule redirects port 53 traffic to it + if tproxyArgs != nil && tproxyArgs.ConsulDNSIP != "" { + allocNet.DNS.Servers = append([]string{tproxyArgs.ConsulDNSIP}, + allocNet.DNS.Servers...) + } + + return allocNet, nil +} + +func (c *cniNetworkConfigurator) setupTransparentProxyArgs(alloc *structs.Allocation, spec *drivers.NetworkIsolationSpec, portMapping []cni.PortMapping, portLabels map[string]int) (*consulIPTables.Config, error) { + + var tproxy *structs.ConsulTransparentProxy + var cluster string + var proxyUID string + var proxyInboundPort int + var proxyOutboundPort int + + exposePorts := []string{} + outboundPorts := []string{} + + tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup) + for _, svc := range tg.Services { + + if svc.Connect.HasTransparentProxy() { + + proxyUID = c.nodeMeta["connect.transparent_proxy.default_uid"] + + // note: the default value matches the default TransparentProxy + // service default for OutboundListenerPort. If the cluster admin + // sets this value to something non-default, they'll need to update + // the metadata on all the nodes to match. see also: + // https://developer.hashicorp.com/consul/docs/connect/config-entries/service-defaults#transparentproxy + outboundPortAttr := c.nodeMeta["connect.transparent_proxy.default_outbound_port"] + parsedOutboundPort, err := strconv.ParseInt(outboundPortAttr, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse default_outbound_port %q as port number: %w", outboundPortAttr, err) + } + proxyOutboundPort = int(parsedOutboundPort) + + envoyPortLabel := "connect-proxy-" + svc.Name + if idx, ok := portLabels[envoyPortLabel]; ok { + proxyInboundPort = int(portMapping[idx].HostPort) + } + + tproxy = svc.Connect.SidecarService.Proxy.TransparentProxy + cluster = svc.Cluster + + if tproxy.UID != "" { + proxyUID = tproxy.UID + } + + outboundPorts = helper.ConvertSlice( + tproxy.ExcludeOutboundPorts, func(p uint16) string { return fmt.Sprint(p) }) + + if tproxy.OutboundPort != 0 { + proxyOutboundPort = int(tproxy.OutboundPort) + } + + // ExcludeInboundPorts can be either a numeric port number or a port + // label that we need to convert into a port number + for _, portLabel := range tproxy.ExcludeInboundPorts { + if _, err := strconv.ParseUint(portLabel, 10, 64); err == nil { + exposePorts = append(exposePorts, portLabel) + continue + } + if idx, ok := portLabels[portLabel]; ok { + // TODO: should this be the HostPort or the ContainerPort? + exposePorts = append(exposePorts, + strconv.FormatInt(int64(portMapping[idx].HostPort), 10)) + } + } + + // we also exclude Expose.Paths which will get used for health check + if svc.Connect.SidecarService.Proxy.Expose != nil { + for _, path := range svc.Connect.SidecarService.Proxy.Expose.Paths { + if idx, ok := portLabels[path.ListenerPort]; ok { + exposePorts = append(exposePorts, + strconv.FormatInt(int64(portMapping[idx].HostPort), 10)) + } + } + } + + // only one Connect block is allowed with tproxy and this will have + // been validated on job registration + + if len(exposePorts) == 0 { + exposePorts = nil + } + if len(outboundPorts) == 0 { + outboundPorts = nil + } + break + } + } + + if tproxy != nil { + var dnsAddr string + var dnsPort int + if !tproxy.NoDNS { + dnsAddr, dnsPort = c.dnsFromAttrs(cluster) + } + + consulIPTablesCfgMap := &consulIPTables.Config{ + // Traffic in the DNSChain is directed to the Consul DNS Service IP. + // For outbound TCP and UDP traffic going to port 53 (DNS), jump to + // the DNSChain. Only redirect traffic that's going to consul's DNS + // IP. + ConsulDNSIP: dnsAddr, + ConsulDNSPort: dnsPort, + + // Don't redirect proxy traffic back to itself, return it to the + // next chain for processing. + ProxyUserID: proxyUID, + + // Redirects inbound TCP traffic hitting the PROXY_IN_REDIRECT chain + // to Envoy's inbound listener port. + ProxyInboundPort: proxyInboundPort, + + // Redirects outbound TCP traffic hitting PROXY_REDIRECT chain to + // Envoy's outbound listener port. + ProxyOutboundPort: proxyOutboundPort, + + ExcludeInboundPorts: exposePorts, + ExcludeOutboundPorts: outboundPorts, + ExcludeOutboundCIDRs: tproxy.ExcludeOutboundCIDRs, + ExcludeUIDs: tproxy.ExcludeUIDs, + NetNS: spec.Path, + } + + return consulIPTablesCfgMap, nil + + } + return nil, nil +} + +func (c *cniNetworkConfigurator) dnsFromAttrs(cluster string) (string, int) { + var dnsAddrAttr, dnsPortAttr string + if cluster == structs.ConsulDefaultCluster || cluster == "" { + dnsAddrAttr = "consul.dns.addr" + dnsPortAttr = "consul.dns.port" + } else { + dnsAddrAttr = "consul." + cluster + ".dns.addr" + dnsPortAttr = "consul." + cluster + ".dns.port" + } + dnsAddr, ok := c.nodeAttrs[dnsAddrAttr] + if !ok || dnsAddr == "" { + return "", 0 + } + dnsPort, ok := c.nodeAttrs[dnsPortAttr] + if !ok || dnsPort == "0" || dnsPort == "-1" { + return "", 0 + } + port, err := strconv.ParseInt(dnsPort, 10, 64) + if err != nil { + return "", 0 // note: this will have been checked in fingerprint + } + return dnsAddr, int(port) } // cniToAllocNet converts a cni.Result to an AllocNetworkStatus or returns an @@ -240,7 +435,9 @@ func (c *cniNetworkConfigurator) Teardown(ctx context.Context, alloc *structs.Al return err } - if err := c.cni.Remove(ctx, alloc.ID, spec.Path, cni.WithCapabilityPortMap(getPortMapping(alloc, c.ignorePortMappingHostIP))); err != nil { + portMap, _ := getPortMapping(alloc, c.ignorePortMappingHostIP) + + if err := c.cni.Remove(ctx, alloc.ID, spec.Path, cni.WithCapabilityPortMap(portMap)); err != nil { // create a real handle to iptables ipt, iptErr := iptables.New() if iptErr != nil { @@ -347,8 +544,9 @@ func (c *cniNetworkConfigurator) ensureCNIInitialized() error { // getPortMapping builds a list of portMapping structs that are used as the // portmapping capability arguments for the portmap CNI plugin -func getPortMapping(alloc *structs.Allocation, ignoreHostIP bool) []cni.PortMapping { +func getPortMapping(alloc *structs.Allocation, ignoreHostIP bool) ([]cni.PortMapping, map[string]int) { var ports []cni.PortMapping + labels := map[string]int{} if len(alloc.AllocatedResources.Shared.Ports) == 0 && len(alloc.AllocatedResources.Shared.Networks) > 0 { for _, network := range alloc.AllocatedResources.Shared.Networks { @@ -362,6 +560,7 @@ func getPortMapping(alloc *structs.Allocation, ignoreHostIP bool) []cni.PortMapp ContainerPort: int32(port.To), Protocol: proto, }) + labels[port.Label] = len(ports) - 1 } } } @@ -380,8 +579,9 @@ func getPortMapping(alloc *structs.Allocation, ignoreHostIP bool) []cni.PortMapp portMapping.HostIP = port.HostIP } ports = append(ports, portMapping) + labels[port.Label] = len(ports) - 1 } } } - return ports + return ports, labels } diff --git a/client/allocrunner/networking_cni_test.go b/client/allocrunner/networking_cni_test.go index b773a9486f3e..4a7ec8d7904e 100644 --- a/client/allocrunner/networking_cni_test.go +++ b/client/allocrunner/networking_cni_test.go @@ -12,8 +12,12 @@ import ( "github.com/containerd/go-cni" "github.com/containernetworking/cni/pkg/types" + "github.com/hashicorp/consul/sdk/iptables" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/drivers" "github.com/shoenig/test" "github.com/shoenig/test/must" "github.com/stretchr/testify/require" @@ -200,3 +204,246 @@ func TestCNI_cniToAllocNet_Invalid(t *testing.T) { require.Error(t, err) require.Nil(t, allocNet) } + +func TestCNI_setupTproxyArgs(t *testing.T) { + ci.Parallel(t) + + nodeMeta := map[string]string{ + "connect.transparent_proxy.default_outbound_port": "15001", + "connect.transparent_proxy.default_uid": "101", + } + + nodeAttrs := map[string]string{ + "consul.dns.addr": "192.168.1.117", + "consul.dns.port": "8600", + } + + alloc := mock.ConnectAlloc() + tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup) + tg.Networks = []*structs.NetworkResource{{ + Mode: "bridge", + DNS: &structs.DNSConfig{}, + ReservedPorts: []structs.Port{ // non-Connect port + { + Label: "http", + Value: 9002, + To: 9002, + HostNetwork: "default", + }, + { + Label: "metrics", + Value: 9001, + To: 9000, + HostNetwork: "default", + }, + }, + DynamicPorts: []structs.Port{ // Connect port + { + Label: "connect-proxy-count-dashboard", + Value: 0, + To: -1, + HostNetwork: "default", + }, + }, + }} + tg.Services[0].PortLabel = "9002" + tg.Services[0].Connect.SidecarService.Proxy = &structs.ConsulProxy{ + LocalServiceAddress: "", + LocalServicePort: 0, + Upstreams: []structs.ConsulUpstream{}, + Expose: &structs.ConsulExposeConfig{}, + Config: map[string]interface{}{}, + } + + spec := &drivers.NetworkIsolationSpec{ + Mode: "group", + Path: "/var/run/docker/netns/a2ece01ea7bc", + Labels: map[string]string{"docker_sandbox_container_id": "4a77cdaad5"}, + HostsConfig: &drivers.HostsConfig{}, + } + + portMapping := []cni.PortMapping{ + { + HostPort: 9002, + ContainerPort: 9002, + Protocol: "tcp", + HostIP: "", + }, + { + HostPort: 9002, + ContainerPort: 9002, + Protocol: "udp", + HostIP: "", + }, + { + HostPort: 9001, + ContainerPort: 9000, + Protocol: "tcp", + HostIP: "", + }, + { + HostPort: 9001, + ContainerPort: 9000, + Protocol: "udp", + HostIP: "", + }, + { + HostPort: 25018, + ContainerPort: 25018, + Protocol: "tcp", + HostIP: "", + }, + { + HostPort: 25018, + ContainerPort: 20000, + Protocol: "udp", + HostIP: "", + }, + } + portLabels := map[string]int{ + "connect-proxy-testconnect": 5, + "http": 1, + "metrics": 3, + } + + testCases := []struct { + name string + cluster string + tproxySpec *structs.ConsulTransparentProxy + exposeSpec *structs.ConsulExposeConfig + nodeAttrs map[string]string + expectIPConfig *iptables.Config + expectErr string + }{ + { + name: "nil tproxy spec returns no error or iptables config", + }, + { + name: "minimal empty tproxy spec returns defaults", + tproxySpec: &structs.ConsulTransparentProxy{}, + expectIPConfig: &iptables.Config{ + ConsulDNSIP: "192.168.1.117", + ConsulDNSPort: 8600, + ProxyUserID: "101", + ProxyInboundPort: 25018, + ProxyOutboundPort: 15001, + NetNS: "/var/run/docker/netns/a2ece01ea7bc", + }, + }, + { + name: "tproxy spec with overrides", + tproxySpec: &structs.ConsulTransparentProxy{ + UID: "1001", + OutboundPort: 16001, + ExcludeInboundPorts: []string{"http", "9000"}, + ExcludeOutboundPorts: []uint16{443, 80}, + ExcludeOutboundCIDRs: []string{"10.0.0.1/8"}, + ExcludeUIDs: []string{"10", "42"}, + NoDNS: true, + }, + expectIPConfig: &iptables.Config{ + ProxyUserID: "1001", + ProxyInboundPort: 25018, + ProxyOutboundPort: 16001, + ExcludeInboundPorts: []string{"9002", "9000"}, + ExcludeOutboundCIDRs: []string{"10.0.0.1/8"}, + ExcludeOutboundPorts: []string{"443", "80"}, + ExcludeUIDs: []string{"10", "42"}, + NetNS: "/var/run/docker/netns/a2ece01ea7bc", + }, + }, + { + name: "tproxy with exposed checks", + tproxySpec: &structs.ConsulTransparentProxy{}, + exposeSpec: &structs.ConsulExposeConfig{ + Paths: []structs.ConsulExposePath{{ + Path: "/v1/example", + Protocol: "http", + LocalPathPort: 9000, + ListenerPort: "metrics", + }}, + }, + expectIPConfig: &iptables.Config{ + ConsulDNSIP: "192.168.1.117", + ConsulDNSPort: 8600, + ProxyUserID: "101", + ProxyInboundPort: 25018, + ProxyOutboundPort: 15001, + ExcludeInboundPorts: []string{"9001"}, + NetNS: "/var/run/docker/netns/a2ece01ea7bc", + }, + }, + { + name: "tproxy with no consul dns fingerprint", + nodeAttrs: map[string]string{}, + tproxySpec: &structs.ConsulTransparentProxy{}, + expectIPConfig: &iptables.Config{ + ProxyUserID: "101", + ProxyInboundPort: 25018, + ProxyOutboundPort: 15001, + NetNS: "/var/run/docker/netns/a2ece01ea7bc", + }, + }, + { + name: "tproxy with consul dns disabled", + nodeAttrs: map[string]string{ + "consul.dns.port": "-1", + "consul.dns.addr": "192.168.1.117", + }, + tproxySpec: &structs.ConsulTransparentProxy{}, + expectIPConfig: &iptables.Config{ + ProxyUserID: "101", + ProxyInboundPort: 25018, + ProxyOutboundPort: 15001, + NetNS: "/var/run/docker/netns/a2ece01ea7bc", + }, + }, + { + name: "tproxy for other cluster with default consul dns disabled", + cluster: "infra", + nodeAttrs: map[string]string{ + "consul.dns.port": "-1", + "consul.dns.addr": "192.168.1.110", + "consul.infra.dns.port": "8600", + "consul.infra.dns.addr": "192.168.1.117", + }, + tproxySpec: &structs.ConsulTransparentProxy{}, + expectIPConfig: &iptables.Config{ + ConsulDNSIP: "192.168.1.117", + ConsulDNSPort: 8600, + ProxyUserID: "101", + ProxyInboundPort: 25018, + ProxyOutboundPort: 15001, + NetNS: "/var/run/docker/netns/a2ece01ea7bc", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tg.Services[0].Connect.SidecarService.Proxy.TransparentProxy = tc.tproxySpec + tg.Services[0].Connect.SidecarService.Proxy.Expose = tc.exposeSpec + tg.Services[0].Cluster = tc.cluster + + c := &cniNetworkConfigurator{ + nodeAttrs: nodeAttrs, + nodeMeta: nodeMeta, + logger: testlog.HCLogger(t), + } + if tc.nodeAttrs != nil { + c.nodeAttrs = tc.nodeAttrs + } + + iptablesCfg, err := c.setupTransparentProxyArgs(alloc, spec, portMapping, portLabels) + if tc.expectErr == "" { + must.NoError(t, err) + must.Eq(t, tc.expectIPConfig, iptablesCfg) + } else { + must.EqError(t, err, tc.expectErr) + must.Nil(t, iptablesCfg) + } + }) + + } + +} diff --git a/client/client.go b/client/client.go index a82189abeafd..efa179fc052f 100644 --- a/client/client.go +++ b/client/client.go @@ -124,6 +124,19 @@ const ( // // https://www.envoyproxy.io/docs/envoy/latest/operations/cli#cmdoption-concurrency defaultConnectProxyConcurrency = "1" + + // defaultTransparentProxyUID is the default UID of the Envoy proxy + // container user, for use with transparent proxy + defaultTransparentProxyUID = "101" + + // defaultTransparentProxyOutboundPort is the default outbound port for the + // Envoy proxy, for use with transparent proxy. Note the default value + // patches the default TransparentProxy service default for + // OutboundListenerPort. If the cluster admin sets this value to something + // non-default, they'll need to update the metadata on all the nodes to + // match. See also: + // https://developer.hashicorp.com/consul/docs/connect/config-entries/service-defaults#transparentproxy + defaultTransparentProxyOutboundPort = "15001" ) var ( @@ -1578,6 +1591,12 @@ func (c *Client) setupNode() error { if _, ok := node.Meta["connect.proxy_concurrency"]; !ok { node.Meta["connect.proxy_concurrency"] = defaultConnectProxyConcurrency } + if _, ok := node.Meta["connect.transparent_proxy.default_uid"]; !ok { + node.Meta["connect.transparent_proxy.default_uid"] = defaultTransparentProxyUID + } + if _, ok := node.Meta["connect.transparent_proxy.default_outbound_port"]; !ok { + node.Meta["connect.transparent_proxy.default_outbound_port"] = defaultTransparentProxyOutboundPort + } // Since node.Meta will get dynamic metadata merged in, save static metadata // here.