Skip to content

Commit

Permalink
Ensure full functionality of AntreaProxy with proxyAll enabled when k…
Browse files Browse the repository at this point in the history
…ube-proxy presents

Signed-off-by: Hongliang Liu <lhongliang@vmware.com>
  • Loading branch information
hongliangl committed May 30, 2024
1 parent 166db3b commit 2254f1d
Show file tree
Hide file tree
Showing 22 changed files with 1,219 additions and 314 deletions.
1 change: 1 addition & 0 deletions .github/workflows/kind.yml
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ jobs:
--coverage \
--feature-gates AllAlpha=true,AllBeta=true \
--proxy-all \
--no-kube-proxy \
--node-ipam \
--extra-vlan \
--multicast \
Expand Down
10 changes: 8 additions & 2 deletions ci/kind/test-e2e-kind.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ _usage="Usage: $0 [--encap-mode <mode>] [--ip-family <v4|v6|dual>] [--coverage]
--feature-gates A comma-separated list of key=value pairs that describe feature gates, e.g. AntreaProxy=true,Egress=false.
--run Run only tests matching the regexp.
--proxy-all Enables Antrea proxy with all Service support.
--no-kube-proxy Don't deploy kube-proxy.
--load-balancer-mode LoadBalancer mode.
--node-ipam Enables Antrea NodeIPAM.
--multicast Enables Multicast.
Expand Down Expand Up @@ -73,6 +74,7 @@ mode=""
ipfamily="v4"
feature_gates=""
proxy_all=false
no_kube_proxy=false
load_balancer_mode=""
node_ipam=false
multicast=false
Expand Down Expand Up @@ -108,6 +110,10 @@ case $key in
proxy_all=true
shift
;;
--no-kube-proxy)
no_kube_proxy=true
shift
;;
--load-balancer-mode)
load_balancer_mode="$2"
shift 2
Expand Down Expand Up @@ -305,7 +311,7 @@ function setup_cluster {
echoerr "invalid value for --ip-family \"$ipfamily\", expected \"v4\" or \"v6\""
exit 1
fi
if $proxy_all; then
if $no_kube_proxy; then
args="$args --no-kube-proxy"
fi
if $node_ipam; then
Expand Down Expand Up @@ -359,7 +365,7 @@ function run_test {
cat $CH_OPERATOR_YML | docker exec -i kind-control-plane dd of=/root/clickhouse-operator-install-bundle.yml
fi

if $proxy_all; then
if $no_kube_proxy; then
apiserver=$(docker exec -i kind-control-plane kubectl get endpoints kubernetes --no-headers | awk '{print $2}')
if $coverage; then
docker exec -i kind-control-plane sed -i.bak -E "s/^[[:space:]]*[#]?kubeAPIServerOverride[[:space:]]*:[[:space:]]*[a-z\"]+[[:space:]]*$/ kubeAPIServerOverride: \"$apiserver\"/" /root/antrea-coverage.yml /root/antrea-ipsec-coverage.yml
Expand Down
2 changes: 2 additions & 0 deletions cmd/antrea-agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,7 @@ func run(o *Options) error {
routeClient, err := route.NewClient(networkConfig,
o.config.NoSNAT,
o.config.AntreaProxy.ProxyAll,
o.config.KubeAPIServerOverride != "",
connectUplinkToBridge,
nodeNetworkPolicyEnabled,
multicastEnabled,
Expand Down Expand Up @@ -442,6 +443,7 @@ func run(o *Options) error {
o.defaultLoadBalancerMode,
v4GroupCounter,
v6GroupCounter,
o.config.KubeAPIServerOverride != "",
enableMulticlusterGW)
if err != nil {
return fmt.Errorf("error when creating proxier: %v", err)
Expand Down
18 changes: 12 additions & 6 deletions docs/antrea-proxy.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,20 @@ the introduction of `proxyAll`, Antrea relied on userspace kube-proxy, which is
no longer actively maintained by the K8s community and is slower than other
kube-proxy backends.

Note that on Linux, even when `proxyAll` is enabled, kube-proxy will usually
take priority and will keep handling NodePort Service traffic (unless the source
is a Pod, which is pretty unusual as Pods typically access Services by
ClusterIP). This is because kube-proxy rules typically come before the rules
installed by AntreaProxy to redirect traffic to OVS. When kube-proxy is not
deployed or is removed from the cluster, AntreaProxy will then handle all
Note that on Linux, before Antrea v2.1, when `proxyAll` is enabled, kube-proxy
will usually take priority and will keep handling NodePort Service traffic
(unless the source is a Pod, which is pretty unusual as Pods typically access
Services by ClusterIP). This is because kube-proxy rules typically come before
the rules installed by AntreaProxy to redirect traffic to OVS. When kube-proxy
is not deployed or is removed from the cluster, AntreaProxy will then handle all
Service traffic.

It's worth noting that starting with Antrea v2.1, when only `proxyAll` is enabled,
even if kube-proxy is present, AntreaProxy is capable of handing all types of
Service traffic except for that of the kube Service,. This is accomplished by
prioritizing the rules installed by AntreaProxy redirecting Service traffic to OVS
over those installed by kube-proxy.

### Removing kube-proxy

In this section, we will provide steps to run a K8s cluster without kube-proxy,
Expand Down
13 changes: 7 additions & 6 deletions pkg/agent/controller/networkpolicy/node_reconciler_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
"antrea.io/antrea/pkg/agent/config"
"antrea.io/antrea/pkg/agent/route"
"antrea.io/antrea/pkg/agent/types"
ipsetutil "antrea.io/antrea/pkg/agent/util/ipset"
"antrea.io/antrea/pkg/agent/util/iptables"
"antrea.io/antrea/pkg/apis/controlplane/v1beta2"
secv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1"
Expand Down Expand Up @@ -622,7 +623,7 @@ func buildCoreIPTRule(ipProtocol iptables.Protocol,
builder := iptables.NewRuleBuilder(iptChain)
if isIngress {
if ipset != "" {
builder = builder.MatchIPSetSrc(ipset)
builder = builder.MatchIPSetSrc(ipset, ipsetutil.HashIP)
} else if ipnet != "" {
builder = builder.MatchCIDRSrc(ipnet)
} else {
Expand All @@ -631,7 +632,7 @@ func buildCoreIPTRule(ipProtocol iptables.Protocol,
}
} else {
if ipset != "" {
builder = builder.MatchIPSetDst(ipset)
builder = builder.MatchIPSetDst(ipset, ipsetutil.HashIP)
} else if ipnet != "" {
builder = builder.MatchCIDRDst(ipnet)
} else {
Expand All @@ -648,8 +649,8 @@ func buildCoreIPTRule(ipProtocol iptables.Protocol,
fallthrough
case "sctp":
builder = builder.MatchTransProtocol(transProtocol).
MatchSrcPort(service.SrcPort, service.SrcEndPort).
MatchDstPort(service.Port, service.EndPort)
MatchPortSrc(service.SrcPort, service.SrcEndPort).
MatchPortDst(service.Port, service.EndPort)
case "icmp":
builder = builder.MatchICMP(service.ICMPType, service.ICMPCode, ipProtocol)
}
Expand All @@ -673,8 +674,8 @@ func buildServiceIPTRules(ipProtocol iptables.Protocol, services []v1beta2.Servi
fallthrough
case "sctp":
copiedBuilder = copiedBuilder.MatchTransProtocol(transProtocol).
MatchSrcPort(svc.SrcPort, svc.SrcEndPort).
MatchDstPort(svc.Port, svc.EndPort)
MatchPortSrc(svc.SrcPort, svc.SrcEndPort).
MatchPortDst(svc.Port, svc.EndPort)
case "icmp":
copiedBuilder = copiedBuilder.MatchICMP(svc.ICMPType, svc.ICMPCode, ipProtocol)
}
Expand Down
65 changes: 58 additions & 7 deletions pkg/agent/proxy/proxier.go
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ func (p *proxier) installNodePortService(localGroupID, clusterGroupID binding.Gr
}); err != nil {
return fmt.Errorf("failed to install NodePort load balancing flows: %w", err)
}
if err := p.routeClient.AddNodePort(p.nodePortAddresses, svcPort, protocol); err != nil {
if err := p.routeClient.AddNodePortConf(p.nodePortAddresses, svcPort, protocol); err != nil {
return fmt.Errorf("failed to install NodePort traffic redirecting rules: %w", err)
}
return nil
Expand All @@ -571,7 +571,7 @@ func (p *proxier) uninstallNodePortService(svcPort uint16, protocol binding.Prot
if err := p.ofClient.UninstallServiceFlows(svcIP, svcPort, protocol); err != nil {
return fmt.Errorf("failed to remove NodePort load balancing flows: %w", err)
}
if err := p.routeClient.DeleteNodePort(p.nodePortAddresses, svcPort, protocol); err != nil {
if err := p.routeClient.DeleteNodePortConf(p.nodePortAddresses, svcPort, protocol); err != nil {
return fmt.Errorf("failed to remove NodePort traffic redirecting rules: %w", err)
}
return nil
Expand All @@ -586,6 +586,8 @@ func (p *proxier) installExternalIPService(svcInfoStr string,
trafficPolicyLocal bool,
affinityTimeout uint16,
loadBalancerMode agentconfig.LoadBalancerMode) error {
externalIPs := make([]net.IP, 0, len(externalIPStrings))
isDSR := features.DefaultFeatureGate.Enabled(features.LoadBalancerModeDSR) && loadBalancerMode == agentconfig.LoadBalancerModeDSR
for _, externalIP := range externalIPStrings {
ip := net.ParseIP(externalIP)
if err := p.ofClient.InstallServiceFlows(&agenttypes.ServiceConfig{
Expand All @@ -599,18 +601,28 @@ func (p *proxier) installExternalIPService(svcInfoStr string,
IsExternal: true,
IsNodePort: false,
IsNested: false, // Unsupported for ExternalIP
IsDSR: features.DefaultFeatureGate.Enabled(features.LoadBalancerModeDSR) && loadBalancerMode == agentconfig.LoadBalancerModeDSR,
IsDSR: isDSR,
}); err != nil {
return fmt.Errorf("failed to install ExternalIP load balancing flows: %w", err)
}
if err := p.addRouteForServiceIP(svcInfoStr, ip, p.routeClient.AddExternalIPRoute); err != nil {
return fmt.Errorf("failed to install ExternalIP traffic redirecting routes: %w", err)
}
externalIPs = append(externalIPs, ip)
}
if len(externalIPs) != 0 {
if err := p.routeClient.AddExternalIPConf(externalIPs, svcPort, protocol); err != nil {
return err
}
}
return nil
}

func (p *proxier) uninstallExternalIPService(svcInfoStr string, externalIPStrings []string, svcPort uint16, protocol binding.Protocol) error {
func (p *proxier) uninstallExternalIPService(svcInfoStr string,
externalIPStrings []string,
svcPort uint16,
protocol binding.Protocol) error {
externalIPs := make([]net.IP, 0, len(externalIPStrings))
for _, externalIP := range externalIPStrings {
ip := net.ParseIP(externalIP)
if err := p.ofClient.UninstallServiceFlows(ip, svcPort, protocol); err != nil {
Expand All @@ -619,6 +631,12 @@ func (p *proxier) uninstallExternalIPService(svcInfoStr string, externalIPString
if err := p.deleteRouteForServiceIP(svcInfoStr, ip, p.routeClient.DeleteExternalIPRoute); err != nil {
return fmt.Errorf("failed to remove ExternalIP traffic redirecting routes: %w", err)
}
externalIPs = append(externalIPs, ip)
}
if len(externalIPs) != 0 {
if err := p.routeClient.DeleteExternalIPConf(externalIPs, svcPort, protocol); err != nil {
return err
}
}
return nil
}
Expand All @@ -632,6 +650,8 @@ func (p *proxier) installLoadBalancerService(svcInfoStr string,
trafficPolicyLocal bool,
affinityTimeout uint16,
loadBalancerMode agentconfig.LoadBalancerMode) error {
loadBalancerIPs := make([]net.IP, 0, len(loadBalancerIPStrings))
isDSR := features.DefaultFeatureGate.Enabled(features.LoadBalancerModeDSR) && loadBalancerMode == agentconfig.LoadBalancerModeDSR
for _, ingress := range loadBalancerIPStrings {
if ingress != "" {
ip := net.ParseIP(ingress)
Expand All @@ -646,17 +666,24 @@ func (p *proxier) installLoadBalancerService(svcInfoStr string,
IsExternal: true,
IsNodePort: false,
IsNested: false, // Unsupported for LoadBalancerIP
IsDSR: features.DefaultFeatureGate.Enabled(features.LoadBalancerModeDSR) && loadBalancerMode == agentconfig.LoadBalancerModeDSR,
IsDSR: isDSR,
}); err != nil {
return fmt.Errorf("failed to install LoadBalancer load balancing flows: %w", err)
}
if p.proxyAll {
if err := p.addRouteForServiceIP(svcInfoStr, ip, p.routeClient.AddExternalIPRoute); err != nil {
return fmt.Errorf("failed to install LoadBalancer traffic redirecting routes: %w", err)
}
loadBalancerIPs = append(loadBalancerIPs, ip)
}
}
}
if p.proxyAll && len(loadBalancerIPs) != 0 {
if err := p.routeClient.AddLoadBalancerConf(loadBalancerIPs, svcPort, protocol); err != nil {
return err
}
}

return nil
}

Expand All @@ -677,7 +704,11 @@ func (p *proxier) addRouteForServiceIP(svcInfoStr string, ip net.IP, addRouteFn
return nil
}

func (p *proxier) uninstallLoadBalancerService(svcInfoStr string, loadBalancerIPStrings []string, svcPort uint16, protocol binding.Protocol) error {
func (p *proxier) uninstallLoadBalancerService(svcInfoStr string,
loadBalancerIPStrings []string,
svcPort uint16,
protocol binding.Protocol) error {
loadBalancerIPs := make([]net.IP, 0, len(loadBalancerIPStrings))
for _, ingress := range loadBalancerIPStrings {
if ingress != "" {
ip := net.ParseIP(ingress)
Expand All @@ -688,9 +719,15 @@ func (p *proxier) uninstallLoadBalancerService(svcInfoStr string, loadBalancerIP
if err := p.deleteRouteForServiceIP(svcInfoStr, ip, p.routeClient.DeleteExternalIPRoute); err != nil {
return fmt.Errorf("failed to remove LoadBalancer traffic redirecting routes: %w", err)
}
loadBalancerIPs = append(loadBalancerIPs, ip)
}
}
}
if p.proxyAll && len(loadBalancerIPs) != 0 {
if err := p.routeClient.DeleteLoadBalancerConf(loadBalancerIPs, svcPort, protocol); err != nil {
return err
}
}
return nil
}

Expand Down Expand Up @@ -1359,6 +1396,7 @@ func newProxier(
nodeIPChecker nodeip.Checker,
nodePortAddresses []net.IP,
proxyAllEnabled bool,
isKubeAPIServerOverridden bool,
skipServices []string,
proxyLoadBalancerIPs bool,
defaultLoadBalancerMode agentconfig.LoadBalancerMode,
Expand Down Expand Up @@ -1389,7 +1427,13 @@ func newProxier(
}

var serviceHealthServer healthcheck.ServiceHealthServer
if proxyAllEnabled {
if proxyAllEnabled && isKubeAPIServerOverridden {
// The serviceHealthServer of AntreaProxy should be initialized only when kube-proxy is removed. If kube-proxy
// is present, it also provides the serviceHealthServer functionality, and both servers would attempt to start
// an HTTP service on the same port in a K8s Node, causing conflicts. The option `kubeAPIServerOverride`
// in antrea-agent is used to determine if the serviceHealthServer of AntreaProxy should be initialized. The
// option must be set if kube-proxy is removed, though it can also be set when kube-proxy is present (not
// recommended and unnecessary). We assume this option is set only when kube-proxy is removed.
nodePortAddressesString := make([]string, len(nodePortAddresses))
for i, address := range nodePortAddresses {
nodePortAddressesString[i] = address.String()
Expand Down Expand Up @@ -1500,6 +1544,7 @@ func newDualStackProxier(
nodePortAddressesIPv4 []net.IP,
nodePortAddressesIPv6 []net.IP,
proxyAllEnabled bool,
isKubeAPIServerOverridden bool,
skipServices []string,
proxyLoadBalancerIPs bool,
defaultLoadBalancerMode agentconfig.LoadBalancerMode,
Expand All @@ -1521,6 +1566,7 @@ func newDualStackProxier(
nodeIPChecker,
nodePortAddressesIPv4,
proxyAllEnabled,
isKubeAPIServerOverridden,
skipServices,
proxyLoadBalancerIPs,
defaultLoadBalancerMode,
Expand All @@ -1543,6 +1589,7 @@ func newDualStackProxier(
nodeIPChecker,
nodePortAddressesIPv6,
proxyAllEnabled,
isKubeAPIServerOverridden,
skipServices,
proxyLoadBalancerIPs,
defaultLoadBalancerMode,
Expand Down Expand Up @@ -1575,6 +1622,7 @@ func NewProxier(hostname string,
defaultLoadBalancerMode agentconfig.LoadBalancerMode,
v4GroupCounter types.GroupCounter,
v6GroupCounter types.GroupCounter,
isKubeAPIServerOverridden bool,
nestedServiceSupport bool) (Proxier, error) {
proxyAllEnabled := proxyConfig.ProxyAll
skipServices := proxyConfig.SkipServices
Expand All @@ -1598,6 +1646,7 @@ func NewProxier(hostname string,
nodePortAddressesIPv4,
nodePortAddressesIPv6,
proxyAllEnabled,
isKubeAPIServerOverridden,
skipServices,
proxyLoadBalancerIPs,
defaultLoadBalancerMode,
Expand All @@ -1621,6 +1670,7 @@ func NewProxier(hostname string,
nodeIPChecker,
nodePortAddressesIPv4,
proxyAllEnabled,
isKubeAPIServerOverridden,
skipServices,
proxyLoadBalancerIPs,
defaultLoadBalancerMode,
Expand All @@ -1643,6 +1693,7 @@ func NewProxier(hostname string,
nodeIPChecker,
nodePortAddressesIPv6,
proxyAllEnabled,
isKubeAPIServerOverridden,
skipServices,
proxyLoadBalancerIPs,
defaultLoadBalancerMode,
Expand Down
Loading

0 comments on commit 2254f1d

Please sign in to comment.