diff --git a/.gitignore b/.gitignore index d15e3a0a303..234560efbbb 100644 --- a/.gitignore +++ b/.gitignore @@ -3,18 +3,23 @@ aws-k8s-agent aws-cni aws-vpc-cni aws-vpc-cni-init +cni-metrics-helper +egress-cni +grpc-health-probe +portmap verify-aws verify-network +# Ignore generated files *~ *.swp .idea/ *.iml .DS_Store -portmap -grpc-health-probe -cni-metrics-helper coverage.txt +cmd/egress-cni-plugin/egress-plugin.log +# Ignore build files core-plugins/ build/ -vendor -egress-cni +vendor/ +# Unignore charts directory as its confilcts with `aws-vpc-cni` binary ignore rule +!charts/** diff --git a/cmd/aws-vpc-cni-init/main.go b/cmd/aws-vpc-cni-init/main.go index 864827b2612..e89da824046 100644 --- a/cmd/aws-vpc-cni-init/main.go +++ b/cmd/aws-vpc-cni-init/main.go @@ -117,6 +117,10 @@ func configureIPv6Settings(procSys procsyswrapper.ProcSys, primaryIF string) err // Check if IPv6 egress support is enabled in IPv4 cluster. ipv6EgressEnabled := utils.GetBoolAsStringEnvVar(envEgressV6, defaultEnableIPv6Egress) if enableIPv6 || ipv6EgressEnabled { + // For IPv6, the following sysctls are set: + // 1. forwarding defaults to 1 + // 2. accept_ra defaults to 2 + // 3. accept_redirects defaults to 1 entry := "net/ipv6/conf/all/forwarding" err = procSys.Set(entry, "1") if err != nil { @@ -125,10 +129,33 @@ func configureIPv6Settings(procSys procsyswrapper.ProcSys, primaryIF string) err val, _ := procSys.Get(entry) log.Infof("Updated %s to %s", entry, val) + // accept_ra must be set to 2 so that RA routes are installed by the kernel on secondary ENIs + // For IPv6, this setting must be inherited by the trunk ENI. It must be set here as IPAMD does + // not have permission to set sysctl values. + entry = "net/ipv6/conf/default/accept_ra" + err = procSys.Set(entry, "2") + if err != nil { + return errors.Wrap(err, "Failed to set IPv6 accept Router Advertisements to 2") + } + val, _ = procSys.Get(entry) + log.Infof("Updated %s to %s", entry, val) + + entry = "net/ipv6/conf/default/accept_redirects" + err = procSys.Set(entry, "1") + if err != nil { + return errors.Wrap(err, "Failed to enable IPv6 accept redirects") + } + val, _ = procSys.Get(entry) + log.Infof("Updated %s to %s", entry, val) + + // For the primary ENI in IPv6, sysctls are set to: + // 1. forwarding=1 + // 2. accept_ra=2 + // 3. accept_redirects=1 entry = "net/ipv6/conf/" + primaryIF + "/accept_ra" err = procSys.Set(entry, "2") if err != nil { - return errors.Wrap(err, "Failed to enable IPv6 accept_ra") + return errors.Wrap(err, "Failed to enable IPv6 accept_ra on primary ENI") } val, _ = procSys.Get(entry) log.Infof("Updated %s to %s", entry, val) diff --git a/cmd/routed-eni-cni-plugin/driver/driver.go b/cmd/routed-eni-cni-plugin/driver/driver.go index 0e09c69f9d1..2c650894e69 100644 --- a/cmd/routed-eni-cni-plugin/driver/driver.go +++ b/cmd/routed-eni-cni-plugin/driver/driver.go @@ -290,6 +290,9 @@ func (n *linuxNetwork) SetupBranchENIPodNetwork(hostVethName string, contVethNam oldFromHostVethRule := n.netLink.NewRule() oldFromHostVethRule.IifName = hostVethName oldFromHostVethRule.Priority = networkutils.VlanRulePriority + if v6Addr != nil { + oldFromHostVethRule.Family = unix.AF_INET6 + } if err := networkutils.NetLinkRuleDelAll(n.netLink, oldFromHostVethRule); err != nil { return errors.Wrapf(err, "SetupBranchENIPodNetwork: failed to delete hostVeth rule for %s", hostVethName) } @@ -328,9 +331,13 @@ func (n *linuxNetwork) TeardownBranchENIPodNetwork(containerAddr *net.IPNet, vla return errors.Wrapf(err, "TeardownBranchENIPodNetwork: failed to teardown vlan") } + ipFamily := unix.AF_INET + if containerAddr.IP.To4() == nil { + ipFamily = unix.AF_INET6 + } // to handle the migration between different enforcingMode, we try to clean up rules under both mode since the pod might be setup with a different mode. rtTable := vlanID + 100 - if err := n.teardownIIFBasedContainerRouteRules(rtTable, log); err != nil { + if err := n.teardownIIFBasedContainerRouteRules(rtTable, ipFamily, log); err != nil { return errors.Wrapf(err, "TeardownBranchENIPodNetwork: unable to teardown IIF based container routes and rules") } if err := n.teardownIPBasedContainerRouteRules(containerAddr, rtTable, log); err != nil { @@ -360,20 +367,29 @@ func (n *linuxNetwork) setupVeth(hostVethName string, contVethName string, netns return nil, errors.Wrapf(err, "failed to find hostVeth %s", hostVethName) } + // For IPv6, host veth sysctls must be set to: + // 1. accept_ra=0 + // 2. accept_redirects=1 + // 3. forwarding=0 if err := n.procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", hostVethName), "0"); err != nil { if !os.IsNotExist(err) { return nil, errors.Wrapf(err, "failed to disable IPv6 router advertisements") } log.Debugf("Ignoring '%v' writing to accept_ra: Assuming kernel lacks IPv6 support", err) } - - if err := n.procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/accept_redirects", hostVethName), "0"); err != nil { + if err := n.procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/accept_redirects", hostVethName), "1"); err != nil { if !os.IsNotExist(err) { return nil, errors.Wrapf(err, "failed to disable IPv6 ICMP redirects") } log.Debugf("Ignoring '%v' writing to accept_redirects: Assuming kernel lacks IPv6 support", err) } - log.Debugf("Successfully disabled IPv6 RA and ICMP redirects on hostVeth %s", hostVethName) + if err := n.procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/forwarding", hostVethName), "0"); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "failed to disable IPv6 forwarding") + } + log.Debugf("Ignoring '%v' writing to forwarding: Assuming kernel lacks IPv6 support", err) + } + log.Debugf("Successfully set IPv6 sysctls on hostVeth %s", hostVethName) // Explicitly set the veth to UP state, because netlink doesn't always do that on all the platforms with net.FlagUp. // veth won't get a link local address unless it's set to UP state. @@ -400,12 +416,37 @@ func (n *linuxNetwork) setupVlan(vlanID int, eniMAC string, subnetGW string, par return nil, errors.Wrapf(err, "failed to add vlan link %s", vlanLinkName) } - // 3. bring up the vlan + // 3. Set IPv6 sysctls + // accept_ra=0 + // accept_redirects=1 + // forwarding=0 + if err := n.procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", vlanLinkName), "0"); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "failed to disable IPv6 router advertisements") + } + log.Debugf("Ignoring '%v' writing to accept_ra: Assuming kernel lacks IPv6 support", err) + } + + if err := n.procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/accept_redirects", vlanLinkName), "1"); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "failed to enable IPv6 ICMP redirects") + } + log.Debugf("Ignoring '%v' writing to accept_redirects: Assuming kernel lacks IPv6 support", err) + } + + if err := n.procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/forwarding", vlanLinkName), "0"); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "failed to disable IPv6 forwarding") + } + log.Debugf("Ignoring '%v' writing to forwarding: Assuming kernel lacks IPv6 support", err) + } + + // 4. bring up the vlan if err := n.netLink.LinkSetUp(vlanLink); err != nil { return nil, errors.Wrapf(err, "failed to setUp vlan link %s", vlanLinkName) } - // 4. create default routes for vlan + // 5. create default routes for vlan routes := buildRoutesForVlan(rtTable, vlanLink.Index, net.ParseIP(subnetGW)) for _, r := range routes { if err := n.netLink.RouteReplace(&r); err != nil { @@ -511,6 +552,7 @@ func (n *linuxNetwork) teardownIPBasedContainerRouteRules(containerAddr *net.IPN // traffic to container(iif hostVlan) will be routed via the specified rtTable. // traffic from container(iif hostVeth) will be routed via the specified rtTable. func (n *linuxNetwork) setupIIFBasedContainerRouteRules(hostVeth netlink.Link, containerAddr *net.IPNet, hostVlan netlink.Link, rtTable int, log logger.Logger) error { + isV6 := containerAddr.IP.To4() == nil route := netlink.Route{ LinkIndex: hostVeth.Attrs().Index, Scope: netlink.SCOPE_LINK, @@ -528,6 +570,9 @@ func (n *linuxNetwork) setupIIFBasedContainerRouteRules(hostVeth netlink.Link, c fromHostVlanRule.IifName = hostVlan.Attrs().Name fromHostVlanRule.Priority = networkutils.VlanRulePriority fromHostVlanRule.Table = rtTable + if isV6 { + fromHostVlanRule.Family = unix.AF_INET6 + } if err := n.netLink.RuleAdd(fromHostVlanRule); err != nil && !networkutils.IsRuleExistsError(err) { return errors.Wrapf(err, "unable to setup fromHostVlan rule, hostVlan=%s, rtTable=%v", hostVlan.Attrs().Name, rtTable) } @@ -537,6 +582,9 @@ func (n *linuxNetwork) setupIIFBasedContainerRouteRules(hostVeth netlink.Link, c fromHostVethRule.IifName = hostVeth.Attrs().Name fromHostVethRule.Priority = networkutils.VlanRulePriority fromHostVethRule.Table = rtTable + if isV6 { + fromHostVethRule.Family = unix.AF_INET6 + } if err := n.netLink.RuleAdd(fromHostVethRule); err != nil && !networkutils.IsRuleExistsError(err) { return errors.Wrapf(err, "unable to setup fromHostVeth rule, hostVeth=%s, rtTable=%v", hostVeth.Attrs().Name, rtTable) } @@ -545,10 +593,11 @@ func (n *linuxNetwork) setupIIFBasedContainerRouteRules(hostVeth netlink.Link, c return nil } -func (n *linuxNetwork) teardownIIFBasedContainerRouteRules(rtTable int, log logger.Logger) error { +func (n *linuxNetwork) teardownIIFBasedContainerRouteRules(rtTable int, family int, log logger.Logger) error { rule := n.netLink.NewRule() rule.Priority = networkutils.VlanRulePriority rule.Table = rtTable + rule.Family = family if err := networkutils.NetLinkRuleDelAll(n.netLink, rule); err != nil { return errors.Wrapf(err, "failed to delete IIF based rules, rtTable=%v", rtTable) @@ -560,17 +609,23 @@ func (n *linuxNetwork) teardownIIFBasedContainerRouteRules(rtTable int, log logg // buildRoutesForVlan builds routes required for the vlan link. func buildRoutesForVlan(vlanTableID int, vlanIndex int, gw net.IP) []netlink.Route { + maskLen := 32 + zeroAddr := net.IPv4zero + if gw.To4() == nil { + maskLen = 128 + zeroAddr = net.IPv6zero + } return []netlink.Route{ // Add a direct link route for the pod vlan link only. { LinkIndex: vlanIndex, - Dst: &net.IPNet{IP: gw, Mask: net.CIDRMask(32, 32)}, + Dst: &net.IPNet{IP: gw, Mask: net.CIDRMask(maskLen, maskLen)}, Scope: netlink.SCOPE_LINK, Table: vlanTableID, }, { LinkIndex: vlanIndex, - Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Dst: &net.IPNet{IP: zeroAddr, Mask: net.CIDRMask(0, maskLen)}, Scope: netlink.SCOPE_UNIVERSE, Gw: gw, Table: vlanTableID, diff --git a/cmd/routed-eni-cni-plugin/driver/driver_test.go b/cmd/routed-eni-cni-plugin/driver/driver_test.go index 61fd4060315..7af04ebef50 100644 --- a/cmd/routed-eni-cni-plugin/driver/driver_test.go +++ b/cmd/routed-eni-cni-plugin/driver/driver_test.go @@ -162,6 +162,10 @@ func Test_linuxNetwork_SetupPodNetwork(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", value: "0", }, }, @@ -224,6 +228,10 @@ func Test_linuxNetwork_SetupPodNetwork(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", value: "0", }, }, @@ -306,6 +314,10 @@ func Test_linuxNetwork_SetupPodNetwork(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", value: "0", }, }, @@ -508,6 +520,7 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { vlanID := 7 eniMac := "00:00:5e:00:53:af" subnetGW := "192.168.120.1" + subnetV6GW := "2600::" parentIfIndex := 3 hostVethWithIndex9 := &netlink.Veth{ @@ -522,31 +535,62 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { IP: net.ParseIP("192.168.100.42"), Mask: net.CIDRMask(32, 32), } + containerV6Addr := &net.IPNet{ + IP: net.ParseIP("2600::2"), + Mask: net.CIDRMask(128, 128), + } oldFromHostVethRule := netlink.NewRule() oldFromHostVethRule.IifName = "eni8ea2c11fe35" oldFromHostVethRule.Priority = networkutils.VlanRulePriority + oldFromHostVethV6Rule := netlink.NewRule() + oldFromHostVethV6Rule.IifName = "eni8ea2c11fe35" + oldFromHostVethV6Rule.Priority = networkutils.VlanRulePriority + oldFromHostVethV6Rule.Family = netlink.FAMILY_V6 + fromHostVlanRule := netlink.NewRule() fromHostVlanRule.IifName = vlanLinkPostAddWithIndex11.Name fromHostVlanRule.Priority = networkutils.VlanRulePriority fromHostVlanRule.Table = 107 + fromHostVlanV6Rule := netlink.NewRule() + fromHostVlanV6Rule.IifName = vlanLinkPostAddWithIndex11.Name + fromHostVlanV6Rule.Priority = networkutils.VlanRulePriority + fromHostVlanV6Rule.Table = 107 + fromHostVlanV6Rule.Family = netlink.FAMILY_V6 + fromHostVethRule := netlink.NewRule() fromHostVethRule.IifName = hostVethWithIndex9.Name fromHostVethRule.Priority = networkutils.VlanRulePriority fromHostVethRule.Table = 107 + fromHostVethV6Rule := netlink.NewRule() + fromHostVethV6Rule.IifName = hostVethWithIndex9.Name + fromHostVethV6Rule.Priority = networkutils.VlanRulePriority + fromHostVethV6Rule.Table = 107 + fromHostVethV6Rule.Family = netlink.FAMILY_V6 + toContainerRule := netlink.NewRule() toContainerRule.Dst = containerAddr toContainerRule.Priority = networkutils.ToContainerRulePriority toContainerRule.Table = unix.RT_TABLE_MAIN + toContainerV6Rule := netlink.NewRule() + toContainerV6Rule.Dst = containerV6Addr + toContainerV6Rule.Priority = networkutils.ToContainerRulePriority + toContainerV6Rule.Table = unix.RT_TABLE_MAIN + fromContainerRule := netlink.NewRule() fromContainerRule.Src = containerAddr fromContainerRule.Priority = networkutils.FromPodRulePriority fromContainerRule.Table = 107 + fromContainerV6Rule := netlink.NewRule() + fromContainerV6Rule.Src = containerV6Addr + fromContainerV6Rule.Priority = networkutils.FromPodRulePriority + fromContainerV6Rule.Table = 107 + type linkByNameCall struct { linkName string link netlink.Link @@ -702,6 +746,22 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", value: "0", }, }, @@ -720,6 +780,124 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { podSGEnforcingMode: sgpp.EnforcingModeStrict, }, }, + { + name: "successfully setup IPv6 pod network - traffic enforced with strict mode", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "vlan.eth.7", + err: errors.New("not exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 11, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: vlanLinkPostAddWithIndex11, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.ParseIP(subnetV6GW), Mask: net.CIDRMask(128, 128)}, + Scope: netlink.SCOPE_LINK, + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.IPv6zero, Mask: net.CIDRMask(0, 128)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP(subnetV6GW), + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: hostVethWithIndex9.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerV6Addr, + Table: 107, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: fromHostVlanV6Rule, + }, + { + rule: fromHostVethV6Rule, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: oldFromHostVethV6Rule, + err: syscall.ENOENT, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: nil, + v6Addr: containerV6Addr, + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: subnetV6GW, + parentIfIndex: parentIfIndex, + mtu: 9001, + podSGEnforcingMode: sgpp.EnforcingModeStrict, + }, + }, { name: "successfully setup pod network - traffic enforced with standard mode", fields: fields{ @@ -804,6 +982,22 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", value: "0", }, }, @@ -822,6 +1016,124 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { podSGEnforcingMode: sgpp.EnforcingModeStandard, }, }, + { + name: "successfully setup v6 pod network - traffic enforced with standard mode", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "vlan.eth.7", + err: errors.New("not exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 11, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: vlanLinkPostAddWithIndex11, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.ParseIP(subnetV6GW), Mask: net.CIDRMask(128, 128)}, + Scope: netlink.SCOPE_LINK, + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.IPv6zero, Mask: net.CIDRMask(0, 128)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP(subnetV6GW), + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: hostVethWithIndex9.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerV6Addr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerV6Rule, + }, + { + rule: fromContainerV6Rule, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: oldFromHostVethV6Rule, + err: syscall.ENOENT, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: nil, + v6Addr: containerV6Addr, + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: subnetV6GW, + parentIfIndex: parentIfIndex, + mtu: 9001, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + }, { name: "failed to setup vethPair", fields: fields{ @@ -889,6 +1201,10 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", value: "0", }, }, @@ -954,6 +1270,10 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", value: "0", }, }, @@ -1050,6 +1370,22 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", value: "0", }, }, @@ -1146,6 +1482,22 @@ func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", value: "0", }, }, @@ -1235,27 +1587,52 @@ func Test_linuxNetwork_TeardownBranchENIPodNetwork(t *testing.T) { IP: net.ParseIP("192.168.100.42"), Mask: net.CIDRMask(32, 32), } + containerV6Addr := &net.IPNet{ + IP: net.ParseIP("2600::2"), + Mask: net.CIDRMask(128, 128), + } vlanRuleForRTTable107 := netlink.NewRule() vlanRuleForRTTable107.Priority = networkutils.VlanRulePriority vlanRuleForRTTable107.Table = 107 + vlanRuleForRTTable107.Family = netlink.FAMILY_V4 + + vlanV6RuleForRTTable107 := netlink.NewRule() + vlanV6RuleForRTTable107.Priority = networkutils.VlanRulePriority + vlanV6RuleForRTTable107.Table = 107 + vlanV6RuleForRTTable107.Family = netlink.FAMILY_V6 toContainerRoute := &netlink.Route{ Scope: netlink.SCOPE_LINK, Dst: containerAddr, Table: unix.RT_TABLE_MAIN, } + toContainerV6Route := &netlink.Route{ + Scope: netlink.SCOPE_LINK, + Dst: containerV6Addr, + Table: unix.RT_TABLE_MAIN, + } toContainerRule := netlink.NewRule() toContainerRule.Dst = containerAddr toContainerRule.Priority = networkutils.ToContainerRulePriority toContainerRule.Table = unix.RT_TABLE_MAIN + toContainerV6Rule := netlink.NewRule() + toContainerV6Rule.Dst = containerV6Addr + toContainerV6Rule.Priority = networkutils.ToContainerRulePriority + toContainerV6Rule.Table = unix.RT_TABLE_MAIN + fromContainerRule := netlink.NewRule() fromContainerRule.Src = containerAddr fromContainerRule.Priority = networkutils.FromPodRulePriority fromContainerRule.Table = 107 + fromContainerV6Rule := netlink.NewRule() + fromContainerV6Rule.Src = containerV6Addr + fromContainerV6Rule.Priority = networkutils.FromPodRulePriority + fromContainerV6Rule.Table = 107 + type linkByNameCall struct { linkName string link netlink.Link @@ -1292,7 +1669,54 @@ func Test_linuxNetwork_TeardownBranchENIPodNetwork(t *testing.T) { wantErr error }{ { - name: "successfully teardown pod network - pod was setup under strict mode", + name: "successfully teardown pod network - pod was setup under strict mode", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + err: syscall.ESRCH, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: vlanRuleForRTTable107, + }, + { + rule: vlanRuleForRTTable107, + }, + { + rule: vlanRuleForRTTable107, + err: syscall.ENOENT, + }, + { + rule: toContainerRule, + err: syscall.ENOENT, + }, + { + rule: fromContainerRule, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + vlanID: vlanID, + podSGEnforcingMode: sgpp.EnforcingModeStrict, + }, + }, + { + name: "successfully teardown v6 pod network - pod was setup under strict mode", fields: fields{ linkByNameCalls: []linkByNameCall{ { @@ -1307,33 +1731,33 @@ func Test_linuxNetwork_TeardownBranchENIPodNetwork(t *testing.T) { }, routeDelCalls: []routeDelCall{ { - route: toContainerRoute, + route: toContainerV6Route, err: syscall.ESRCH, }, }, ruleDelCalls: []ruleDelCall{ { - rule: vlanRuleForRTTable107, + rule: vlanV6RuleForRTTable107, }, { - rule: vlanRuleForRTTable107, + rule: vlanV6RuleForRTTable107, }, { - rule: vlanRuleForRTTable107, + rule: vlanV6RuleForRTTable107, err: syscall.ENOENT, }, { - rule: toContainerRule, + rule: toContainerV6Rule, err: syscall.ENOENT, }, { - rule: fromContainerRule, + rule: fromContainerV6Rule, err: syscall.ENOENT, }, }, }, args: args{ - containerAddr: containerAddr, + containerAddr: containerV6Addr, vlanID: vlanID, podSGEnforcingMode: sgpp.EnforcingModeStrict, }, @@ -1380,6 +1804,48 @@ func Test_linuxNetwork_TeardownBranchENIPodNetwork(t *testing.T) { podSGEnforcingMode: sgpp.EnforcingModeStandard, }, }, + { + name: "successfully teardown v6 pod network - pod was setup under standard mode", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + routeDelCalls: []routeDelCall{ + { + route: toContainerV6Route, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: vlanV6RuleForRTTable107, + err: syscall.ENOENT, + }, + { + rule: toContainerV6Rule, + }, + { + rule: fromContainerV6Rule, + }, + { + rule: fromContainerV6Rule, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + containerAddr: containerV6Addr, + vlanID: vlanID, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + }, { name: "failed to teardown vlan", fields: fields{ @@ -2792,6 +3258,10 @@ func Test_linuxNetwork_setupVeth(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", value: "0", }, }, @@ -2838,6 +3308,10 @@ func Test_linuxNetwork_setupVeth(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", value: "0", }, }, @@ -2979,7 +3453,7 @@ func Test_linuxNetwork_setupVeth(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", - value: "0", + value: "1", err: errors.New("some error"), }, }, @@ -3022,6 +3496,11 @@ func Test_linuxNetwork_setupVeth(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + err: syscall.ENOENT, + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", value: "0", err: syscall.ENOENT, }, @@ -3065,6 +3544,10 @@ func Test_linuxNetwork_setupVeth(t *testing.T) { }, { key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/forwarding", value: "0", }, }, @@ -3149,12 +3632,19 @@ func Test_linuxNetwork_setupVlan(t *testing.T) { route *netlink.Route err error } + type procSysSetCall struct { + key string + value string + err error + } + type fields struct { linkByNameCalls []linkByNameCall linkAddCalls []linkAddCall linkDelCalls []linkDelCall linkSetupCalls []linkSetupCall routeReplaceCalls []routeReplaceCall + procSysSetCalls []procSysSetCall } type args struct { @@ -3210,6 +3700,20 @@ func Test_linuxNetwork_setupVlan(t *testing.T) { }, }, }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", + value: "0", + }, + }, }, args: args{ vlanID: vlanID, @@ -3264,6 +3768,20 @@ func Test_linuxNetwork_setupVlan(t *testing.T) { }, }, }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", + value: "0", + }, + }, }, args: args{ vlanID: vlanID, @@ -3345,6 +3863,20 @@ func Test_linuxNetwork_setupVlan(t *testing.T) { err: errors.New("some error"), }, }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", + value: "0", + }, + }, }, args: args{ vlanID: vlanID, @@ -3386,6 +3918,20 @@ func Test_linuxNetwork_setupVlan(t *testing.T) { err: errors.New("some error"), }, }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/vlan.eth.7/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/vlan.eth.7/accept_redirects", + value: "1", + }, + { + key: "net/ipv6/conf/vlan.eth.7/forwarding", + value: "0", + }, + }, }, args: args{ vlanID: vlanID, @@ -3425,9 +3971,14 @@ func Test_linuxNetwork_setupVlan(t *testing.T) { for _, call := range tt.fields.routeReplaceCalls { netLink.EXPECT().RouteReplace(call.route).Return(call.err) } + procSys := mock_procsyswrapper.NewMockProcSys(ctrl) + for _, call := range tt.fields.procSysSetCalls { + procSys.EXPECT().Set(call.key, call.value).Return(call.err) + } n := &linuxNetwork{ netLink: netLink, + procSys: procSys, } got, err := n.setupVlan(tt.args.vlanID, tt.args.eniMAC, tt.args.subnetGW, tt.args.parentIfIndex, tt.args.rtTable, testLogger) if tt.wantErr != nil { @@ -3554,16 +4105,31 @@ func Test_linuxNetwork_setupIPBasedContainerRouteRules(t *testing.T) { IP: net.ParseIP("192.168.100.42"), Mask: net.CIDRMask(32, 32), } + containerV6Addr := &net.IPNet{ + IP: net.ParseIP("2600::2"), + Mask: net.CIDRMask(128, 128), + } toContainerRule := netlink.NewRule() toContainerRule.Dst = containerAddr toContainerRule.Priority = networkutils.ToContainerRulePriority toContainerRule.Table = unix.RT_TABLE_MAIN + toContainerV6Rule := netlink.NewRule() + toContainerV6Rule.Dst = containerV6Addr + toContainerV6Rule.Priority = networkutils.ToContainerRulePriority + toContainerV6Rule.Table = unix.RT_TABLE_MAIN + fromContainerRule := netlink.NewRule() fromContainerRule.Src = containerAddr fromContainerRule.Priority = networkutils.FromPodRulePriority fromContainerRule.Table = 101 + + fromContainerV6Rule := netlink.NewRule() + fromContainerV6Rule.Src = containerV6Addr + fromContainerV6Rule.Priority = networkutils.FromPodRulePriority + fromContainerV6Rule.Table = 101 + type routeReplaceCall struct { route *netlink.Route err error @@ -3588,7 +4154,7 @@ func Test_linuxNetwork_setupIPBasedContainerRouteRules(t *testing.T) { wantErr error }{ { - name: "successfully setup routes and rules - without dedicated route table", + name: "successfully setup routes and rules - without dedicated route table - IPv4", fields: fields{ routeReplaceCalls: []routeReplaceCall{ { @@ -3613,7 +4179,32 @@ func Test_linuxNetwork_setupIPBasedContainerRouteRules(t *testing.T) { }, }, { - name: "successfully setup routes and rules - with dedicated route table", + name: "successfully setup routes and rules - without dedicated route table - IPv6", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerV6Addr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerV6Rule, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerV6Addr, + rtTable: unix.RT_TABLE_MAIN, + }, + }, + { + name: "successfully setup routes and rules - with dedicated route table - IPv4", fields: fields{ routeReplaceCalls: []routeReplaceCall{ { @@ -3640,6 +4231,34 @@ func Test_linuxNetwork_setupIPBasedContainerRouteRules(t *testing.T) { rtTable: 101, }, }, + { + name: "successfully setup routes and rules - with dedicated route table - IPv6", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerV6Addr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerV6Rule, + }, + { + rule: fromContainerV6Rule, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerV6Addr, + rtTable: 101, + }, + }, { name: "successfully setup routes and rules - toContainerRule already exists", fields: fields{ @@ -3813,21 +4432,42 @@ func Test_linuxNetwork_teardownIPBasedContainerRouteRules(t *testing.T) { IP: net.ParseIP("192.168.100.42"), Mask: net.CIDRMask(32, 32), } + containerV6Addr := &net.IPNet{ + IP: net.ParseIP("2600::2"), + Mask: net.CIDRMask(128, 128), + } toContainerRoute := &netlink.Route{ Scope: netlink.SCOPE_LINK, Dst: containerAddr, Table: unix.RT_TABLE_MAIN, } + toContainerV6Route := &netlink.Route{ + Scope: netlink.SCOPE_LINK, + Dst: containerV6Addr, + Table: unix.RT_TABLE_MAIN, + } + toContainerRule := netlink.NewRule() toContainerRule.Dst = containerAddr toContainerRule.Priority = networkutils.ToContainerRulePriority toContainerRule.Table = unix.RT_TABLE_MAIN + toContainerV6Rule := netlink.NewRule() + toContainerV6Rule.Dst = containerV6Addr + toContainerV6Rule.Priority = networkutils.ToContainerRulePriority + toContainerV6Rule.Table = unix.RT_TABLE_MAIN + fromContainerRule := netlink.NewRule() fromContainerRule.Src = containerAddr fromContainerRule.Priority = networkutils.FromPodRulePriority fromContainerRule.Table = 101 + + fromContainerV6Rule := netlink.NewRule() + fromContainerV6Rule.Src = containerV6Addr + fromContainerV6Rule.Priority = networkutils.FromPodRulePriority + fromContainerV6Rule.Table = 101 + type routeDelCall struct { route *netlink.Route err error @@ -3852,7 +4492,7 @@ func Test_linuxNetwork_teardownIPBasedContainerRouteRules(t *testing.T) { wantErr error }{ { - name: "successfully teardown routes and rules - without dedicated route table", + name: "successfully teardown routes and rules - without dedicated route table - IPv4", fields: fields{ routeDelCalls: []routeDelCall{ { @@ -3871,7 +4511,26 @@ func Test_linuxNetwork_teardownIPBasedContainerRouteRules(t *testing.T) { }, }, { - name: "successfully teardown routes and rules - with dedicated route table", + name: "successfully teardown routes and rules - without dedicated route table - IPv6", + fields: fields{ + routeDelCalls: []routeDelCall{ + { + route: toContainerV6Route, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerV6Rule, + }, + }, + }, + args: args{ + containerAddr: containerV6Addr, + rtTable: unix.RT_TABLE_MAIN, + }, + }, + { + name: "successfully teardown routes and rules - with dedicated route table - IPv4", fields: fields{ routeDelCalls: []routeDelCall{ { @@ -3896,6 +4555,32 @@ func Test_linuxNetwork_teardownIPBasedContainerRouteRules(t *testing.T) { rtTable: 101, }, }, + { + name: "successfully teardown routes and rules - with dedicated route table - IPv6", + fields: fields{ + routeDelCalls: []routeDelCall{ + { + route: toContainerV6Route, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerV6Rule, + }, + { + rule: fromContainerV6Rule, + }, + { + rule: fromContainerV6Rule, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + containerAddr: containerV6Addr, + rtTable: 101, + }, + }, { name: "successfully teardown routes and rules - succeed when route already deleted", fields: fields{ @@ -4004,6 +4689,10 @@ func Test_linuxNetwork_setupIIFBasedContainerRouteRules(t *testing.T) { IP: net.ParseIP("192.168.100.42"), Mask: net.CIDRMask(32, 32), } + containerV6Addr := &net.IPNet{ + IP: net.ParseIP("2600::2"), + Mask: net.CIDRMask(128, 128), + } rtTable := 101 fromHostVlanRule := netlink.NewRule() @@ -4011,10 +4700,23 @@ func Test_linuxNetwork_setupIIFBasedContainerRouteRules(t *testing.T) { fromHostVlanRule.Priority = networkutils.VlanRulePriority fromHostVlanRule.Table = rtTable + fromHostV6VlanRule := netlink.NewRule() + fromHostV6VlanRule.IifName = hostVlanAttrs.Name + fromHostV6VlanRule.Priority = networkutils.VlanRulePriority + fromHostV6VlanRule.Table = rtTable + fromHostV6VlanRule.Family = unix.AF_INET6 + fromHostVethRule := netlink.NewRule() fromHostVethRule.IifName = hostVethAttrs.Name fromHostVethRule.Priority = networkutils.VlanRulePriority fromHostVethRule.Table = rtTable + + fromHostV6VethRule := netlink.NewRule() + fromHostV6VethRule.IifName = hostVethAttrs.Name + fromHostV6VethRule.Priority = networkutils.VlanRulePriority + fromHostV6VethRule.Table = rtTable + fromHostV6VethRule.Family = unix.AF_INET6 + type routeReplaceCall struct { route *netlink.Route err error @@ -4040,7 +4742,7 @@ func Test_linuxNetwork_setupIIFBasedContainerRouteRules(t *testing.T) { wantErr error }{ { - name: "successfully setup routes and rules", + name: "successfully setup routes and rules - IPv4", fields: fields{ routeReplaceCalls: []routeReplaceCall{ { @@ -4068,6 +4770,35 @@ func Test_linuxNetwork_setupIIFBasedContainerRouteRules(t *testing.T) { rtTable: rtTable, }, }, + { + name: "successfully setup routes and rules - IPv6", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerV6Addr, + Table: rtTable, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: fromHostV6VlanRule, + }, + { + rule: fromHostV6VethRule, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerV6Addr, + hostVlanAttrs: hostVlanAttrs, + rtTable: rtTable, + }, + }, { name: "successfully setup routes and rules - fromHostVlanRule already exists", fields: fields{ @@ -4247,6 +4978,13 @@ func Test_linuxNetwork_teardownIIFBasedContainerRouteRules(t *testing.T) { vlanRuleForTableID101 := netlink.NewRule() vlanRuleForTableID101.Priority = networkutils.VlanRulePriority vlanRuleForTableID101.Table = 101 + vlanRuleForTableID101.Family = netlink.FAMILY_V4 + + vlanIPv6RuleForTableID101 := netlink.NewRule() + vlanIPv6RuleForTableID101.Priority = networkutils.VlanRulePriority + vlanIPv6RuleForTableID101.Table = 101 + vlanIPv6RuleForTableID101.Family = netlink.FAMILY_V6 + type ruleDelCall struct { rule *netlink.Rule err error @@ -4257,6 +4995,7 @@ func Test_linuxNetwork_teardownIIFBasedContainerRouteRules(t *testing.T) { type args struct { rtTable int + family int } tests := []struct { name string @@ -4265,7 +5004,7 @@ func Test_linuxNetwork_teardownIIFBasedContainerRouteRules(t *testing.T) { wantErr error }{ { - name: "teardown both rules successfully", + name: "teardown both rules successfully - IPv4", fields: fields{ ruleDelCalls: []ruleDelCall{ { @@ -4282,6 +5021,28 @@ func Test_linuxNetwork_teardownIIFBasedContainerRouteRules(t *testing.T) { }, args: args{ rtTable: 101, + family: netlink.FAMILY_V4, + }, + }, + { + name: "teardown both rules successfully - IPv6", + fields: fields{ + ruleDelCalls: []ruleDelCall{ + { + rule: vlanIPv6RuleForTableID101, + }, + { + rule: vlanIPv6RuleForTableID101, + }, + { + rule: vlanIPv6RuleForTableID101, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + rtTable: 101, + family: netlink.FAMILY_V6, }, }, { @@ -4296,6 +5057,7 @@ func Test_linuxNetwork_teardownIIFBasedContainerRouteRules(t *testing.T) { }, args: args{ rtTable: 101, + family: netlink.FAMILY_V4, }, wantErr: errors.New("failed to delete IIF based rules, rtTable=101: some error"), }, @@ -4313,7 +5075,7 @@ func Test_linuxNetwork_teardownIIFBasedContainerRouteRules(t *testing.T) { n := &linuxNetwork{ netLink: netLink, } - err := n.teardownIIFBasedContainerRouteRules(tt.args.rtTable, testLogger) + err := n.teardownIIFBasedContainerRouteRules(tt.args.rtTable, tt.args.family, testLogger) if tt.wantErr != nil { assert.EqualError(t, err, tt.wantErr.Error()) } else { @@ -4324,6 +5086,11 @@ func Test_linuxNetwork_teardownIIFBasedContainerRouteRules(t *testing.T) { } func Test_buildRoutesForVlan(t *testing.T) { + v4Gateway := net.ParseIP("192.168.128.1") + v6Gateway := net.ParseIP("fe80::beef") + vlanTableID := 101 + vlanIndex := 7 + type args struct { vlanTableID int vlanIndex int @@ -4337,23 +5104,46 @@ func Test_buildRoutesForVlan(t *testing.T) { { name: "IPv4", args: args{ - vlanTableID: 101, - vlanIndex: 7, - gw: net.ParseIP("192.168.128.1"), + vlanTableID: vlanTableID, + vlanIndex: vlanIndex, + gw: v4Gateway, }, want: []netlink.Route{ { - LinkIndex: 7, - Dst: &net.IPNet{IP: net.ParseIP("192.168.128.1"), Mask: net.CIDRMask(32, 32)}, + LinkIndex: vlanIndex, + Dst: &net.IPNet{IP: v4Gateway, Mask: net.CIDRMask(32, 32)}, Scope: netlink.SCOPE_LINK, - Table: 101, + Table: vlanTableID, }, { - LinkIndex: 7, + LinkIndex: vlanIndex, Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, Scope: netlink.SCOPE_UNIVERSE, - Gw: net.ParseIP("192.168.128.1"), - Table: 101, + Gw: v4Gateway, + Table: vlanTableID, + }, + }, + }, + { + name: "IPv6", + args: args{ + vlanTableID: vlanTableID, + vlanIndex: vlanIndex, + gw: v6Gateway, + }, + want: []netlink.Route{ + { + LinkIndex: vlanIndex, + Dst: &net.IPNet{IP: v6Gateway, Mask: net.CIDRMask(128, 128)}, + Scope: netlink.SCOPE_LINK, + Table: vlanTableID, + }, + { + LinkIndex: vlanIndex, + Dst: &net.IPNet{IP: net.IPv6zero, Mask: net.CIDRMask(0, 128)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: v6Gateway, + Table: vlanTableID, }, }, }, diff --git a/go.mod b/go.mod index e2523497dd7..d5fb25d87bb 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( helm.sh/helm/v3 v3.13.2 k8s.io/api v0.28.3 k8s.io/apimachinery v0.28.3 - k8s.io/cli-runtime v0.28.2 + k8s.io/cli-runtime v0.28.3 k8s.io/client-go v0.28.3 sigs.k8s.io/controller-runtime v0.16.3 ) diff --git a/go.sum b/go.sum index d689663ef44..0e6c6df6f9f 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,8 @@ k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= k8s.io/apiserver v0.28.3 h1:8Ov47O1cMyeDzTXz0rwcfIIGAP/dP7L8rWbEljRcg5w= k8s.io/apiserver v0.28.3/go.mod h1:YIpM+9wngNAv8Ctt0rHG4vQuX/I5rvkEMtZtsxW2rNM= -k8s.io/cli-runtime v0.28.2 h1:64meB2fDj10/ThIMEJLO29a1oujSm0GQmKzh1RtA/uk= -k8s.io/cli-runtime v0.28.2/go.mod h1:bTpGOvpdsPtDKoyfG4EG041WIyFZLV9qq4rPlkyYfDA= +k8s.io/cli-runtime v0.28.3 h1:lvuJYVkwCqHEvpS6KuTZsUVwPePFjBfSGvuaLl2SxzA= +k8s.io/cli-runtime v0.28.3/go.mod h1:jeX37ZPjIcENVuXDDTskG3+FnVuZms5D9omDXS/2Jjc= k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= diff --git a/pkg/awsutils/awsutils.go b/pkg/awsutils/awsutils.go index d2c2776c4cc..dc2c9e3a40b 100644 --- a/pkg/awsutils/awsutils.go +++ b/pkg/awsutils/awsutils.go @@ -250,6 +250,16 @@ func (eni ENIMetadata) PrimaryIPv4Address() string { return "" } +// PrimaryIPv6Address returns the primary IPv6 address of this node +func (eni ENIMetadata) PrimaryIPv6Address() string { + for _, addr := range eni.IPv6Addresses { + if addr.Ipv6Address != nil { + return aws.StringValue(addr.Ipv6Address) + } + } + return "" +} + // TagMap keeps track of the EC2 tags on each ENI type TagMap map[string]string @@ -577,6 +587,7 @@ func (cache *EC2InstanceMetadataCache) getENIMetadata(eniMAC string) (ENIMetadat log.Debugf("Found ENI: %s, MAC %s, device %d", eniID, eniMAC, deviceNum) + // Get IPv4 and IPv6 addresses assigned to interface cidr, err := cache.imds.GetSubnetIPv4CIDRBlock(ctx, eniMAC) if err != nil { awsAPIErrInc("GetSubnetIPv4CIDRBlock", err) @@ -589,7 +600,6 @@ func (cache *EC2InstanceMetadataCache) getENIMetadata(eniMAC string) (ENIMetadat return ENIMetadata{}, err } - // TODO: return a simpler data structure. ec2ip4s := make([]*ec2.NetworkInterfacePrivateIpAddress, len(imdsIPv4s)) for i, ip4 := range imdsIPv4s { ec2ip4s[i] = &ec2.NetworkInterfacePrivateIpAddress{ @@ -598,6 +608,30 @@ func (cache *EC2InstanceMetadataCache) getENIMetadata(eniMAC string) (ENIMetadat } } + var ec2ip6s []*ec2.NetworkInterfaceIpv6Address + var subnetV6Cidr string + if cache.v6Enabled { + // For IPv6 ENIs, do not error on missing IPv6 information + v6cidr, err := cache.imds.GetSubnetIPv6CIDRBlocks(ctx, eniMAC) + if err != nil { + awsAPIErrInc("GetSubnetIPv6CIDRBlocks", err) + } else { + subnetV6Cidr = v6cidr.String() + } + + imdsIPv6s, err := cache.imds.GetIPv6s(ctx, eniMAC) + if err != nil { + awsAPIErrInc("GetIPv6s", err) + } else { + ec2ip6s = make([]*ec2.NetworkInterfaceIpv6Address, len(imdsIPv6s)) + for i, ip6 := range imdsIPv6s { + ec2ip6s[i] = &ec2.NetworkInterfaceIpv6Address{ + Ipv6Address: aws.String(ip6.String()), + } + } + } + } + var ec2ipv4Prefixes []*ec2.Ipv4PrefixSpecification var ec2ipv6Prefixes []*ec2.Ipv6PrefixSpecification @@ -637,6 +671,8 @@ func (cache *EC2InstanceMetadataCache) getENIMetadata(eniMAC string) (ENIMetadat SubnetIPv4CIDR: cidr.String(), IPv4Addresses: ec2ip4s, IPv4Prefixes: ec2ipv4Prefixes, + SubnetIPv6CIDR: subnetV6Cidr, + IPv6Addresses: ec2ip6s, IPv6Prefixes: ec2ipv6Prefixes, }, nil } diff --git a/pkg/awsutils/awsutils_test.go b/pkg/awsutils/awsutils_test.go index 02bdfad5ecc..a1f3719cbd6 100644 --- a/pkg/awsutils/awsutils_test.go +++ b/pkg/awsutils/awsutils_test.go @@ -957,6 +957,7 @@ func TestEC2InstanceMetadataCache_waitForENIAndPrefixesAttached(t *testing.T) { Ipv6Prefix: &v6PrefixIP, }, }, + IPv6Addresses: []*ec2.NetworkInterfaceIpv6Address{}, } tests := []struct { name string diff --git a/pkg/awsutils/imds.go b/pkg/awsutils/imds.go index 4c0141b2de3..901654f2de8 100644 --- a/pkg/awsutils/imds.go +++ b/pkg/awsutils/imds.go @@ -377,7 +377,7 @@ func (imds TypedIMDS) GetVPCIPv6CIDRBlocks(ctx context.Context, mac string) ([]n return ipnets, err } -// GetSubnetIPv6CIDRBlocks returns the IPv4 CIDR block for the subnet in which the interface resides. +// GetSubnetIPv6CIDRBlocks returns the IPv6 CIDR block for the subnet in which the interface resides. func (imds TypedIMDS) GetSubnetIPv6CIDRBlocks(ctx context.Context, mac string) (net.IPNet, error) { key := fmt.Sprintf("network/interfaces/macs/%s/subnet-ipv6-cidr-blocks", mac) return imds.getCIDR(ctx, key) diff --git a/pkg/ipamd/ipamd.go b/pkg/ipamd/ipamd.go index b2607ed998d..dcc6fcb296c 100644 --- a/pkg/ipamd/ipamd.go +++ b/pkg/ipamd/ipamd.go @@ -216,6 +216,8 @@ type IPAMContext struct { lastInsufficientCidrError time.Time enableManageUntaggedMode bool enablePodIPAnnotation bool + // For IPv6 Security Groups for Pods, the gateway address is cached in order to speedup CNI ADD + v6Gateway net.IP } // setUnmanagedENIs will rebuild the set of ENI IDs for ENIs tagged as "no_manage" @@ -446,11 +448,16 @@ func (c *IPAMContext) nodeInit() error { return err } + if c.enablePodENI { + // Patch CNINode with Security Groups for Pods feature regardless of CRD content. + c.enableSecurityGroupsForPods(ctx) + } + if c.enableIPv6 { // We will not support upgrading/converting an existing IPv4 cluster to operate in IPv6 mode. So, we will always - // start with a clean slate in IPv6 mode. We also don't have to deal with dynamic update of Prefix Delegation - // feature in IPv6 mode as we don't support (yet) a non-PD v6 option. In addition, we don't support custom - // networking & SGPP in IPv6 mode yet. So, we will skip the corresponding setup. Will save us from checking + // start with a clean slate in IPv6 mode. We also do not have to deal with dynamic update of Prefix Delegation + // feature in IPv6 mode as we do not support (yet) a non-PD v6 option. In addition, we do not support custom + // networking in IPv6 mode yet, so we will skip the corresponding setup. This will save us from checking // if IPv6 is enabled at multiple places. Once we start supporting these features in IPv6 mode, we can do away // with this check and not change anything else in the below setup. return nil @@ -517,12 +524,6 @@ func (c *IPAMContext) nodeInit() error { } } - if c.enablePodENI { - // Check if we want to add feature into CNINode for trunk interface during node init - // we don't check if the node already has trunk added during initialization - c.askForTrunkENIIfNeeded(ctx) - } - // On node init, check if datastore pool needs to be increased. If so, attach CIDRs from existing ENIs and attach new ENIs. if !c.disableENIProvisioning && c.isDatastorePoolTooLow() { if err := c.increaseDatastorePool(ctx); err != nil { @@ -600,11 +601,18 @@ func (c *IPAMContext) updateIPStats(unmanaged int) { // StartNodeIPPoolManager monitors the IP pool, add or del them when it is required. func (c *IPAMContext) StartNodeIPPoolManager() { + // For IPv6, if Security Groups for Pods is enabled, wait until trunk ENI is attached and add it to the datastore. if c.enableIPv6 { - //Nothing to do in IPv6 Mode. IPv6 is only supported in Prefix delegation mode - //and VPC CNI will only attach one V6 Prefix. + if c.enablePodENI && c.dataStore.GetTrunkENI() == "" { + for !c.checkForTrunkENI() { + time.Sleep(ipPoolMonitorInterval) + } + } + // Outside of Security Groups for Pods, no additional ENIs are attached in IPv6 mode. + // The prefix used for the primary ENI is more than enough for all pods. return } + sleepDuration := ipPoolMonitorInterval / 2 ctx := context.Background() for { @@ -618,10 +626,6 @@ func (c *IPAMContext) StartNodeIPPoolManager() { } func (c *IPAMContext) updateIPPoolIfRequired(ctx context.Context) { - if c.enablePodENI && c.dataStore.GetTrunkENI() == "" { - c.askForTrunkENIIfNeeded(ctx) - } - if c.isDatastorePoolTooLow() { c.increaseDatastorePool(ctx) } else if c.isDatastorePoolTooHigh() { @@ -1035,21 +1039,30 @@ func (c *IPAMContext) setupENI(eni string, eniMetadata awsutils.ENIMetadata, isT if err != nil && err.Error() != datastore.DuplicatedENIError { return errors.Wrapf(err, "failed to add ENI %s to data store", eni) } - // Store the primary IP of the ENI - c.primaryIP[eni] = eniMetadata.PrimaryIPv4Address() + v6TrunkEni := c.enableIPv6 && isTrunkENI + // Store the addressable IP for the ENI + if v6TrunkEni { + c.primaryIP[eni] = eniMetadata.PrimaryIPv6Address() + } else { + c.primaryIP[eni] = eniMetadata.PrimaryIPv4Address() + } if c.enableIPv6 && eni == primaryENI { - // In v6 PD Mode, VPC CNI will only manage primary ENI. Once we start supporting secondary IP and custom - // networking modes for v6, we will relax this restriction. We filter out all the ENIs except Primary ENI - // in v6 mode (prior to landing here), but included the primary ENI check as a safety net. + // In v6 PD mode, VPC CNI will only manage the primary ENI and trunk ENI. Once we start supporting secondary + // IP and custom networking modes for IPv6, this restriction can be relaxed. err := c.assignIPv6Prefix(eni) if err != nil { return errors.Wrapf(err, "Failed to allocate IPv6 Prefixes to Primary ENI") } } else { - // For secondary ENIs, set up the network + var gw net.IP + // For other ENIs, set up the network if eni != primaryENI { - err = c.networkClient.SetupENINetwork(c.primaryIP[eni], eniMetadata.MAC, eniMetadata.DeviceNumber, eniMetadata.SubnetIPv4CIDR) + subnetCidr := eniMetadata.SubnetIPv4CIDR + if v6TrunkEni { + subnetCidr = eniMetadata.SubnetIPv6CIDR + } + gw, err = c.networkClient.SetupENINetwork(c.primaryIP[eni], eniMetadata.MAC, eniMetadata.DeviceNumber, subnetCidr) if err != nil { // Failed to set up the ENI errRemove := c.dataStore.RemoveENIFromDataStore(eni, true) @@ -1060,12 +1073,23 @@ func (c *IPAMContext) setupENI(eni string, eniMetadata awsutils.ENIMetadata, isT return errors.Wrapf(err, "failed to set up ENI %s network", eni) } } - log.Infof("Found ENIs having %d secondary IPs and %d Prefixes", len(eniMetadata.IPv4Addresses), len(eniMetadata.IPv4Prefixes)) - // Either case add the IPs and prefixes to datastore. - c.addENIsecondaryIPsToDataStore(eniMetadata.IPv4Addresses, eni) - c.addENIv4prefixesToDataStore(eniMetadata.IPv4Prefixes, eni) + if !v6TrunkEni { + log.Infof("Found ENIs having %d secondary IPs and %d Prefixes", len(eniMetadata.IPv4Addresses), len(eniMetadata.IPv4Prefixes)) + // Either case add the IPs and prefixes to datastore. + c.addENIsecondaryIPsToDataStore(eniMetadata.IPv4Addresses, eni) + c.addENIv4prefixesToDataStore(eniMetadata.IPv4Prefixes, eni) + } else { + // Cache IPv6 gateway address to speed up CNI ADD + c.v6Gateway = gw + // In strict mode, install gateway rule for ICMPv6 traffic. Note that this is a global rule, but installing/removing here is acceptable as + // the trunk ENI is only attached once. + if err := c.networkClient.UpdateIPv6GatewayRule(&gw); err != nil { + return errors.Wrapf(err, "failed to install IPv6 gateway rule") + } + // This is a trunk ENI in IPv6 PD mode, so do not add IPs or prefixes to datastore + log.Infof("Found IPv6 trunk ENI having %d secondary IPs and %d Prefixes", len(eniMetadata.IPv6Addresses), len(eniMetadata.IPv6Prefixes)) + } } - return nil } @@ -1193,17 +1217,11 @@ func (c *IPAMContext) logPoolStats(dataStoreStats *datastore.DataStoreStats) { log.Debugf("%s: %s, c.maxIPsPerENI = %d", prefix, dataStoreStats, c.maxIPsPerENI) } -func (c *IPAMContext) askForTrunkENIIfNeeded(ctx context.Context) { - // Check that there is room for a trunk ENI to be attached. - if c.dataStore.GetENIs() >= (c.maxENI - c.unmanagedENI) { - log.Error("No slot available for a trunk ENI to be attached.") - return - } - - // We need to signal that VPC Resource Controller needs to attach a trunk ENI. +func (c *IPAMContext) enableSecurityGroupsForPods(ctx context.Context) { + // Signal to the VPC Resource Controller that Security Groups for Pods is enabled err := c.AddFeatureToCNINode(ctx, rcv1alpha1.SecurityGroupsForPods, "") if err != nil { - podENIErrInc("askForTrunkENIIfNeeded") + podENIErrInc("enableSecurityGroupsForPods") log.Errorf("Failed to add SGP feature to CNINode resource", err) } else { log.Infof("Successfully added feature %s to CNINode if not existing", rcv1alpha1.SecurityGroupsForPods) @@ -1268,6 +1286,29 @@ func podENIErrInc(fn string) { prometheusmetrics.PodENIErr.With(prometheus.Labels{"fn": fn}).Inc() } +// Used in IPv6 mode to check if trunk ENI has been successfully attached +func (c *IPAMContext) checkForTrunkENI() bool { + metadataResult, err := c.awsClient.DescribeAllENIs() + if err != nil { + log.Debug("failed to describe attached ENIs") + return false + } + if metadataResult.TrunkENI != "" { + for _, eni := range metadataResult.ENIMetadata { + if eni.ENIID == metadataResult.TrunkENI { + if err := c.setupENI(eni.ENIID, eni, true, false); err == nil { + log.Infof("ENI %s set up", eni.ENIID) + return true + } else { + log.Debugf("failed to setup ENI %s: %v", eni.ENIID, err) + return false + } + } + } + } + return false +} + // nodeIPPoolReconcile reconcile ENI and IP info from metadata service and IP addresses in datastore func (c *IPAMContext) nodeIPPoolReconcile(ctx context.Context, interval time.Duration) { // To reduce the number of EC2 API calls, skip reconciliation if IPs were recently added to the datastore. @@ -2157,17 +2198,15 @@ func (c *IPAMContext) DeallocCidrs(eniID string, deletableCidrs []datastore.Cidr // getPrefixesNeeded returns the number of prefixes need to be allocated to the ENI func (c *IPAMContext) getPrefixesNeeded() int { - - //By default allocate 1 prefix at a time + // By default allocate 1 prefix at a time toAllocate := 1 - //TODO - post GA we can evaluate to see if these two calls can be merged. - //datastoreTargetState already has complex math so adding Prefix target will make it - //even more complex. + // TODO - post GA we can evaluate to see if these two calls can be merged. + // datastoreTargetState already has complex math so adding Prefix target will make it even more complex. short, _, warmIPTargetDefined := c.datastoreTargetState() shortPrefixes, warmPrefixTargetDefined := c.datastorePrefixTargetState() - //WARM_IP_TARGET takes precendence over WARM_PREFIX_TARGET + // WARM_IP_TARGET takes precendence over WARM_PREFIX_TARGET if warmIPTargetDefined { toAllocate = max(toAllocate, short) } else if warmPrefixTargetDefined { @@ -2200,23 +2239,22 @@ func (c *IPAMContext) initENIAndIPLimits() (err error) { } func (c *IPAMContext) isConfigValid() bool { - //Validate that only one among v4 and v6 is enabled. + // Validate that only one among v4 and v6 is enabled. if c.enableIPv4 && c.enableIPv6 { - log.Errorf("IPv4 and IPv6 are both enabled. VPC CNI currently doesn't support dual stack mode") + log.Errorf("IPv4 and IPv6 are both enabled. VPC CNI currently does not support dual stack mode") return false } else if !c.enableIPv4 && !c.enableIPv6 { log.Errorf("IPv4 and IPv6 are both disabled. One of them have to be enabled") return false } - //Validate PD mode is enabled if VPC CNI is operating in IPv6 mode. SGPP and Custom networking are not supported in IPv6 mode. - if c.enableIPv6 && (c.enablePodENI || c.useCustomNetworking || !c.enablePrefixDelegation) { - log.Errorf("IPv6 is supported only in Prefix Delegation mode. Security Group Per Pod and " + - "Custom Networking are not supported in IPv6 mode. Please set the env variables accordingly.") + // Validate PD mode is enabled if VPC CNI is operating in IPv6 mode. Custom networking is not supported in IPv6 mode. + if c.enableIPv6 && (c.useCustomNetworking || !c.enablePrefixDelegation) { + log.Errorf("IPv6 is supported only in Prefix Delegation mode. Custom Networking is not supported in IPv6 mode. Please set the env variables accordingly.") return false } - //Validate Prefix Delegation against v4 and v6 modes. + // Validate Prefix Delegation against v4 and v6 modes. if c.enablePrefixDelegation && !c.awsClient.IsPrefixDelegationSupported() { if c.enableIPv6 { log.Errorf("Prefix Delegation is not supported on non-nitro instance %s. IPv6 is only supported in Prefix delegation Mode. ", c.awsClient.GetInstanceType()) diff --git a/pkg/ipamd/ipamd_test.go b/pkg/ipamd/ipamd_test.go index 3bbffb1d78a..ea0c103c817 100644 --- a/pkg/ipamd/ipamd_test.go +++ b/pkg/ipamd/ipamd_test.go @@ -1877,7 +1877,7 @@ func TestIPAMContext_setupENI(t *testing.T) { newENIMetadata := getSecondaryENIMetadata() m.awsutils.EXPECT().GetPrimaryENI().Return(primaryENIid) - m.network.EXPECT().SetupENINetwork(gomock.Any(), secMAC, secDevice, primarySubnet).Return(errors.New("not able to set route 0.0.0.0/0 via 10.10.10.1 table 2")) + m.network.EXPECT().SetupENINetwork(gomock.Any(), secMAC, secDevice, primarySubnet).Return(nil, errors.New("not able to set route 0.0.0.0/0 via 10.10.10.1 table 2")) err = mockContext.setupENI(newENIMetadata.ENIID, newENIMetadata, false, false) assert.Error(t, err) @@ -1923,14 +1923,14 @@ func TestIPAMContext_setupENIwithPDenabled(t *testing.T) { newENIMetadata := getSecondaryENIMetadata() m.awsutils.EXPECT().GetPrimaryENI().Return(primaryENIid) - m.network.EXPECT().SetupENINetwork(gomock.Any(), secMAC, secDevice, primarySubnet).Return(errors.New("not able to set route 0.0.0.0/0 via 10.10.10.1 table 2")) + m.network.EXPECT().SetupENINetwork(gomock.Any(), secMAC, secDevice, primarySubnet).Return(nil, errors.New("not able to set route 0.0.0.0/0 via 10.10.10.1 table 2")) err = mockContext.setupENI(newENIMetadata.ENIID, newENIMetadata, false, false) assert.Error(t, err) assert.Equal(t, 1, len(mockContext.primaryIP)) } -func TestIPAMContext_askForTrunkENIIfNeeded(t *testing.T) { +func TestIPAMContext_enableSecurityGroupsForPods(t *testing.T) { m := setup(t) defer m.ctrl.Finish() ctx := context.Background() @@ -1963,10 +1963,10 @@ func TestIPAMContext_askForTrunkENIIfNeeded(t *testing.T) { _ = mockContext.dataStore.AddENI("eni-1", 1, true, false, false) // If ENABLE_POD_ENI is not set, nothing happens - mockContext.askForTrunkENIIfNeeded(ctx) + mockContext.enableSecurityGroupsForPods(ctx) mockContext.enablePodENI = true - mockContext.askForTrunkENIIfNeeded(ctx) + mockContext.enableSecurityGroupsForPods(ctx) var notUpdatedNode corev1.Node NodeKey := types.NamespacedName{ Namespace: "", @@ -1984,12 +1984,12 @@ func TestIPAMContext_askForTrunkENIIfNeeded(t *testing.T) { contained := lo.ContainsBy(cniNode.Spec.Features, func(addedFeature rcscheme.Feature) bool { return rcscheme.SecurityGroupsForPods == addedFeature.Name && addedFeature.Value == "" }) - assert.False(t, contained, "the node's CNINode shouldn't be updated for trunk when there is no room") - assert.Equal(t, 0, len(cniNode.Spec.Features)) + assert.True(t, contained, "CNINode should be updated regardless of whether there is room for trunk ENI") + assert.Equal(t, 1, len(cniNode.Spec.Features)) + // Make room for trunk ENI mockContext.maxENI = 4 - // Now there is room! - mockContext.askForTrunkENIIfNeeded(ctx) + mockContext.enableSecurityGroupsForPods(ctx) err = mockContext.k8sClient.Get(ctx, types.NamespacedName{ Name: fakeNode.Name, @@ -1999,7 +1999,7 @@ func TestIPAMContext_askForTrunkENIIfNeeded(t *testing.T) { contained = lo.ContainsBy(cniNode.Spec.Features, func(addedFeature rcscheme.Feature) bool { return rcscheme.SecurityGroupsForPods == addedFeature.Name && addedFeature.Value == "" }) - assert.True(t, contained, "the node's CNINode should be updated for trunk when there is some room") + assert.True(t, contained, "CNINode should be updated regardless of whether there is room for trunk ENI") assert.Equal(t, 1, len(cniNode.Spec.Features)) } @@ -2089,7 +2089,7 @@ func TestIsConfigValid(t *testing.T) { podENIEnabled: true, isNitroInstance: true, }, - want: false, + want: true, }, { name: "ppsg enabled in v4 mode", @@ -2109,7 +2109,7 @@ func TestIsConfigValid(t *testing.T) { m := setup(t) defer m.ctrl.Finish() - if tt.fields.prefixDelegationEnabled && !(tt.fields.podENIEnabled && tt.fields.ipV6Enabled) { + if tt.fields.prefixDelegationEnabled { if tt.fields.isNitroInstance { m.awsutils.EXPECT().IsPrefixDelegationSupported().Return(true) } else { diff --git a/pkg/ipamd/rpc_handler.go b/pkg/ipamd/rpc_handler.go index 869754e5c5c..be148f5a6b2 100644 --- a/pkg/ipamd/rpc_handler.go +++ b/pkg/ipamd/rpc_handler.go @@ -53,11 +53,13 @@ type server struct { // PodENIData is used to parse the list of ENIs in the branch ENI pod annotation type PodENIData struct { - ENIID string `json:"eniId"` - IfAddress string `json:"ifAddress"` - PrivateIP string `json:"privateIp"` - VlanID int `json:"vlanID"` - SubnetCIDR string `json:"subnetCidr"` + ENIID string `json:"eniId"` + IfAddress string `json:"ifAddress"` + PrivateIP string `json:"privateIp"` + IPV6Addr string `json:"ipv6Addr"` + VlanID int `json:"vlanID"` + SubnetCIDR string `json:"subnetCidr"` + SubnetV6CIDR string `json:"subnetV6Cidr"` } // AddNetwork processes CNI add network request and return an IP address for container @@ -77,7 +79,7 @@ func (s *server) AddNetwork(ctx context.Context, in *rpc.AddNetworkRequest) (*rp var deviceNumber, vlanID, trunkENILinkIndex int var ipv4Addr, ipv6Addr, branchENIMAC, podENISubnetGW string var err error - if !s.ipamContext.enableIPv6 && s.ipamContext.enablePodENI { + if s.ipamContext.enablePodENI { // Check pod spec for Branch ENI pod, err := s.ipamContext.GetPod(in.K8S_POD_NAME, in.K8S_POD_NAMESPACE) if err != nil { @@ -108,23 +110,43 @@ func (s *server) AddNetwork(ctx context.Context, in *rpc.AddNetworkRequest) (*rp return &failureResponse, nil } firstENI := podENIData[0] - ipv4Addr = firstENI.PrivateIP + // Get pod IPv4 or IPv6 address based on mode + if s.ipamContext.enableIPv6 { + ipv6Addr = firstENI.IPV6Addr + } else { + ipv4Addr = firstENI.PrivateIP + } branchENIMAC = firstENI.IfAddress vlanID = firstENI.VlanID log.Debugf("Pod vlandId: %d", vlanID) - if ipv4Addr == "" || branchENIMAC == "" || vlanID == 0 { + if (ipv4Addr == "" && ipv6Addr == "") || branchENIMAC == "" || vlanID == 0 { log.Errorf("Failed to parse pod-ENI annotation: %s", val) return &failureResponse, nil } - currentGW := strings.Split(firstENI.SubnetCIDR, "/")[0] - // Increment value CIDR value - nextGWIP, err := networkutils.IncrementIPv4Addr(net.ParseIP(currentGW)) - if err != nil { - log.Errorf("Unable to get next Gateway IP for branch ENI from %s: %v", currentGW, err) - return &failureResponse, nil + var subnetCIDR *net.IPNet + if s.ipamContext.enableIPv6 { + _, subnetCIDR, err = net.ParseCIDR(firstENI.SubnetV6CIDR) + if err != nil { + log.Errorf("Failed to parse V6 subnet CIDR: %s", firstENI.SubnetV6CIDR) + return &failureResponse, nil + } + } else { + _, subnetCIDR, err = net.ParseCIDR(firstENI.SubnetCIDR) + if err != nil { + log.Errorf("Failed to parse V4 subnet CIDR: %s", firstENI.SubnetCIDR) + return &failureResponse, nil + } + } + var gw net.IP + // For IPv6, the gateway is derived from the RA route on the primary ENI. The primary ENI is always in the same subnet as the trunk and branch ENI. + // For IPv4, the gateway is always the .1 address for the subnet CIDR. + if s.ipamContext.enableIPv6 { + gw = s.ipamContext.v6Gateway + } else { + gw = networkutils.GetIPv4Gateway(subnetCIDR) } - podENISubnetGW = nextGWIP.String() + podENISubnetGW = gw.String() deviceNumber = -1 // Not needed for branch ENI, they depend on trunkENIDeviceIndex } else { log.Infof("Send AddNetworkReply: failed to get Branch ENI resource") diff --git a/pkg/networkutils/mocks/network_mocks.go b/pkg/networkutils/mocks/network_mocks.go index a4c9016777f..fb1c64ce2e2 100644 --- a/pkg/networkutils/mocks/network_mocks.go +++ b/pkg/networkutils/mocks/network_mocks.go @@ -124,11 +124,12 @@ func (mr *MockNetworkAPIsMockRecorder) GetRuleListBySrc(arg0, arg1 interface{}) } // SetupENINetwork mocks base method. -func (m *MockNetworkAPIs) SetupENINetwork(arg0, arg1 string, arg2 int, arg3 string) error { +func (m *MockNetworkAPIs) SetupENINetwork(arg0, arg1 string, arg2 int, arg3 string) (net.IP, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetupENINetwork", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 + ret0, _ := ret[0].(net.IP) + ret1, _ := ret[1].(error) + return ret0, ret1 } // SetupENINetwork indicates an expected call of SetupENINetwork. @@ -179,6 +180,20 @@ func (mr *MockNetworkAPIsMockRecorder) UpdateHostIptablesRules(arg0, arg1, arg2, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHostIptablesRules", reflect.TypeOf((*MockNetworkAPIs)(nil).UpdateHostIptablesRules), arg0, arg1, arg2, arg3, arg4) } +// UpdateIPv6GatewayRule mocks base method. +func (m *MockNetworkAPIs) UpdateIPv6GatewayRule(arg0 *net.IP) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateIPv6GatewayRule", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateIPv6GatewayRule indicates an expected call of UpdateIPv6GatewayRule. +func (mr *MockNetworkAPIsMockRecorder) UpdateIPv6GatewayRule(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateIPv6GatewayRule", reflect.TypeOf((*MockNetworkAPIs)(nil).UpdateIPv6GatewayRule), arg0) +} + // UpdateRuleListBySrc mocks base method. func (m *MockNetworkAPIs) UpdateRuleListBySrc(arg0 []netlink.Rule, arg1 net.IPNet) error { m.ctrl.T.Helper() diff --git a/pkg/networkutils/network.go b/pkg/networkutils/network.go index 1ab1b0e06e1..9888776dca7 100644 --- a/pkg/networkutils/network.go +++ b/pkg/networkutils/network.go @@ -15,7 +15,6 @@ package networkutils import ( - "encoding/binary" "encoding/csv" "fmt" "math" @@ -150,7 +149,7 @@ type NetworkAPIs interface { SetupHostNetwork(vpcCIDRs []string, primaryMAC string, primaryAddr *net.IP, enablePodENI bool, v4Enabled bool, v6Enabled bool) error // SetupENINetwork performs ENI level network configuration. Not needed on the primary ENI - SetupENINetwork(eniIP string, mac string, deviceNumber int, subnetCIDR string) error + SetupENINetwork(eniIP string, mac string, deviceNumber int, subnetCIDR string) (net.IP, error) // UpdateHostIptablesRules updates the nat table iptables rules on the host UpdateHostIptablesRules(vpcCIDRs []string, primaryMAC string, primaryAddr *net.IP, v4Enabled bool, v6Enabled bool) error UseExternalSNAT() bool @@ -161,6 +160,7 @@ type NetworkAPIs interface { UpdateRuleListBySrc(ruleList []netlink.Rule, src net.IPNet) error UpdateExternalServiceIpRules(ruleList []netlink.Rule, externalIPs []string) error GetLinkByMac(mac string, retryInterval time.Duration) (netlink.Link, error) + UpdateIPv6GatewayRule(gw *net.IP) error } type linuxNetwork struct { @@ -341,14 +341,13 @@ func (n *linuxNetwork) SetupHostNetwork(vpcv4CIDRs []string, primaryMAC string, } } - // If we want per pod ENIs, we need to give pod ENIs veth bridges a lower priority that the local table, - // or the rp_filter check will fail. - // Note: Per Pod Security Group is not supported for V6 yet. So, cordoning off the PPSG rule (for now) - // with v4 specific check. - if v4Enabled && enablePodENI && n.podSGEnforcingMode == sgpp.EnforcingModeStrict { + // In strict mode, packets egressing pod veth interfaces must route via the trunk ENI in order for security group + // rules to be applied. Therefore, the rule to lookup the local routing table is moved to a lower priority than VLAN rules. + if enablePodENI && n.podSGEnforcingMode == sgpp.EnforcingModeStrict { localRule := n.netLink.NewRule() localRule.Table = localRouteTable localRule.Priority = localRulePriority + localRule.Family = ipFamily // Add new rule with higher priority err := n.netLink.RuleAdd(localRule) if err != nil && !isRuleExistsError(err) { @@ -955,82 +954,126 @@ func linkByMac(mac string, netLink netlinkwrapper.NetLink, retryInterval time.Du } } +// For IPv6, derive gateway address from primary ENI's RA route. Note that in IPv6, all ENIs must be in the same subnet. +func GetIPv6Gateway() (net.IP, error) { + primaryEni, err := netlink.LinkByName("eth0") + if err != nil { + return nil, errors.Wrapf(err, "GetIPv6Gateway failed to get primary ENI") + } + primaryEniRoutes, err := netlink.RouteList(primaryEni, unix.AF_INET6) + if err != nil { + return nil, errors.Wrapf(err, "GetIPv6Gateway failed to list primary ENI routes") + } + for _, route := range primaryEniRoutes { + if route.Table == mainRoutingTable && len(route.Gw) != 0 && route.Protocol.String() == "ra" { + log.Infof("Found IPv6 gateway: %s", route.Gw.String()) + return route.Gw, nil + } + } + return nil, errors.Wrapf(err, "GetIPv6Gateway failed to find eth0 ra route") +} + +func GetIPv4Gateway(eniSubnetCIDR *net.IPNet) net.IP { + gw := eniSubnetCIDR.IP + incrementIPAddr(gw) + return gw +} + // SetupENINetwork adds default route to route table (eni-), so it does not need to be called on the primary ENI -func (n *linuxNetwork) SetupENINetwork(eniIP string, eniMAC string, deviceNumber int, eniSubnetCIDR string) error { +func (n *linuxNetwork) SetupENINetwork(eniIP string, eniMAC string, deviceNumber int, eniSubnetCIDR string) (net.IP, error) { return setupENINetwork(eniIP, eniMAC, deviceNumber, eniSubnetCIDR, n.netLink, retryLinkByMacInterval, retryRouteAddInterval, n.mtu) } func setupENINetwork(eniIP string, eniMAC string, deviceNumber int, eniSubnetCIDR string, netLink netlinkwrapper.NetLink, - retryLinkByMacInterval time.Duration, retryRouteAddInterval time.Duration, mtu int) error { + retryLinkByMacInterval time.Duration, retryRouteAddInterval time.Duration, mtu int) (net.IP, error) { if deviceNumber == 0 { - return errors.New("setupENINetwork should never be called on the primary ENI") + return nil, errors.New("setupENINetwork should never be called on the primary ENI") } tableNumber := deviceNumber + 1 log.Infof("Setting up network for an ENI with IP address %s, MAC address %s, CIDR %s and route table %d", eniIP, eniMAC, eniSubnetCIDR, tableNumber) link, err := linkByMac(eniMAC, netLink, retryLinkByMacInterval) if err != nil { - return errors.Wrapf(err, "setupENINetwork: failed to find the link which uses MAC address %s", eniMAC) + return nil, errors.Wrapf(err, "setupENINetwork: failed to find the link which uses MAC address %s", eniMAC) } if err = netLink.LinkSetMTU(link, mtu); err != nil { - return errors.Wrapf(err, "setupENINetwork: failed to set MTU to %d for %s", mtu, eniIP) + return nil, errors.Wrapf(err, "setupENINetwork: failed to set MTU to %d for %s", mtu, eniIP) } if err = netLink.LinkSetUp(link); err != nil { - return errors.Wrapf(err, "setupENINetwork: failed to bring up ENI %s", eniIP) + return nil, errors.Wrapf(err, "setupENINetwork: failed to bring up ENI %s", eniIP) } - _, ipnet, err := net.ParseCIDR(eniSubnetCIDR) + isV6 := strings.Contains(eniSubnetCIDR, ":") + _, eniSubnetIPNet, err := net.ParseCIDR(eniSubnetCIDR) if err != nil { - return errors.Wrapf(err, "setupENINetwork: invalid IPv4 CIDR block %s", eniSubnetCIDR) + return nil, errors.Wrapf(err, "setupENINetwork: invalid IP CIDR block %s", eniSubnetCIDR) } - - gw, err := IncrementIPv4Addr(ipnet.IP) - if err != nil { - return errors.Wrapf(err, "setupENINetwork: failed to define gateway address from %v", ipnet.IP) + // Get gateway IP address for ENI + var gw net.IP + if isV6 { + if gw, err = GetIPv6Gateway(); err != nil { + return nil, errors.Wrapf(err, "setupENINetwork: unable to get IPv6 gateway") + } + } else { + gw = GetIPv4Gateway(eniSubnetIPNet) } - // Explicitly set the IP on the device if not already set. - // Required for older kernels. - // ip addr show - // ip add del dev (if necessary) - // ip add add dev + // Explicitly delete IP addresses assigned to the device before assign ENI IP. + // For IPv6, do not delete the link-local address. log.Debugf("Setting up ENI's primary IP %s", eniIP) - addrs, err := netLink.AddrList(link, unix.AF_INET) + var family int + if isV6 { + family = unix.AF_INET6 + } else { + family = unix.AF_INET + } + var addrs []netlink.Addr + addrs, err = netLink.AddrList(link, family) if err != nil { - return errors.Wrap(err, "setupENINetwork: failed to list IP address for ENI") + return nil, errors.Wrap(err, "setupENINetwork: failed to list IP address for ENI") } for _, addr := range addrs { - log.Debugf("Deleting existing IP address %s", addr.String()) - if err = netLink.AddrDel(link, &addr); err != nil { - return errors.Wrap(err, "setupENINetwork: failed to delete IP addr from ENI") + if addr.IP.IsGlobalUnicast() { + log.Debugf("Deleting existing IP address %s", addr.String()) + if err = netLink.AddrDel(link, &addr); err != nil { + return nil, errors.Wrap(err, "setupENINetwork: failed to delete IP addr from ENI") + } } } + + eniIPNet := net.ParseIP(eniIP) eniAddr := &net.IPNet{ - IP: net.ParseIP(eniIP), - Mask: ipnet.Mask, + IP: eniIPNet, + Mask: eniSubnetIPNet.Mask, } log.Debugf("Adding IP address %s", eniAddr.String()) if err = netLink.AddrAdd(link, &netlink.Addr{IPNet: eniAddr}); err != nil { - return errors.Wrap(err, "setupENINetwork: failed to add IP addr to ENI") + return nil, errors.Wrap(err, "setupENINetwork: failed to add IP addr to ENI") } linkIndex := link.Attrs().Index log.Debugf("Setting up ENI's default gateway %v, table %d, linkIndex %d", gw, tableNumber, linkIndex) + mask := 32 + zeroAddr := net.IPv4zero + if isV6 { + mask = 128 + zeroAddr = net.IPv6zero + } routes := []netlink.Route{ // Add a direct link route for the host's ENI IP only { LinkIndex: linkIndex, - Dst: &net.IPNet{IP: gw, Mask: net.CIDRMask(32, 32)}, + Dst: &net.IPNet{IP: gw, Mask: net.CIDRMask(mask, mask)}, Scope: netlink.SCOPE_LINK, Table: tableNumber, }, // Route all other traffic via the host's ENI IP { LinkIndex: linkIndex, - Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Dst: &net.IPNet{IP: zeroAddr, Mask: net.CIDRMask(0, mask)}, Scope: netlink.SCOPE_UNIVERSE, Gw: gw, Table: tableNumber, @@ -1039,7 +1082,7 @@ func setupENINetwork(eniIP string, eniMAC string, deviceNumber int, eniSubnetCID for _, r := range routes { err := netLink.RouteDel(&r) if err != nil && !netlinkwrapper.IsNotExistsError(err) { - return errors.Wrap(err, "setupENINetwork: failed to clean up old routes") + return nil, errors.Wrap(err, "setupENINetwork: failed to clean up old routes") } err = retry.NWithBackoff(retry.NewSimpleBackoff(500*time.Millisecond, retryRouteAddInterval, 0.15, 2.0), maxRetryRouteAdd, func() error { @@ -1051,44 +1094,71 @@ func setupENINetwork(eniIP string, eniMAC string, deviceNumber int, eniSubnetCID return nil }) if err != nil { - return err + return gw, err } } // Remove the route that default out to ENI-x out of main route table - _, cidr, err := net.ParseCIDR(eniSubnetCIDR) - if err != nil { - return errors.Wrapf(err, "setupENINetwork: invalid IPv4 CIDR block %s", eniSubnetCIDR) + var defaultRoute netlink.Route + if isV6 { + defaultRoute = netlink.Route{ + LinkIndex: linkIndex, + Dst: eniSubnetIPNet, + Table: mainRoutingTable, + } + } else { + // eniSubnetIPNet was modified by GetIPv4Gateway, so the string must be parsed again + _, eniSubnetCIDRNet, err := net.ParseCIDR(eniSubnetCIDR) + if err != nil { + return gw, errors.Wrapf(err, "setupENINetwork: invalid IPv4 CIDR block: %s", eniSubnetCIDR) + } + defaultRoute = netlink.Route{ + Dst: eniSubnetCIDRNet, + Src: eniIPNet, + Table: mainRoutingTable, + Scope: netlink.SCOPE_LINK, + } } - defaultRoute := netlink.Route{ - Dst: cidr, - Src: net.ParseIP(eniIP), - Table: mainRoutingTable, - Scope: netlink.SCOPE_LINK, + if err := netLink.RouteDel(&defaultRoute); err != nil { + if !netlinkwrapper.IsNotExistsError(err) { + return gw, errors.Wrapf(err, "setupENINetwork: unable to delete default route %s for source IP %s", eniSubnetIPNet.String(), eniIP) + } } + return gw, nil +} - if err := netLink.RouteDel(&defaultRoute); err != nil { +// For IPv6 strict mode, ICMPv6 packets from the gateway must lookup in the local routing table so that branch interfaces can resolve their gateway. +func (n *linuxNetwork) UpdateIPv6GatewayRule(gw *net.IP) error { + gatewayRule := n.netLink.NewRule() + gatewayRule.Src = &net.IPNet{IP: *gw, Mask: net.CIDRMask(128, 128)} + gatewayRule.IPProto = unix.IPPROTO_ICMPV6 + gatewayRule.Table = localRouteTable + gatewayRule.Priority = 0 + gatewayRule.Family = unix.AF_INET6 + if n.podSGEnforcingMode == sgpp.EnforcingModeStrict { + err := n.netLink.RuleAdd(gatewayRule) + if err != nil && !isRuleExistsError(err) { + return errors.Wrap(err, "UpdateIPv6GatewayRule: unable to create rule for IPv6 gateway") + } + } else { + // Rule must be deleted when not in strict mode to support transitions. + err := n.netLink.RuleDel(gatewayRule) if !netlinkwrapper.IsNotExistsError(err) { - return errors.Wrapf(err, "setupENINetwork: unable to delete default route %s for source IP %s", cidr.String(), eniIP) + return errors.Wrap(err, "UpdateIPv6GatewayRule: unable to delete rule for IPv6 gateway") } } return nil } -// IncrementIPv4Addr returns incremented IPv4 address -func IncrementIPv4Addr(ip net.IP) (net.IP, error) { - ip4 := ip.To4() - if ip4 == nil { - return nil, fmt.Errorf("%q is not a valid IPv4 Address", ip) - } - intIP := binary.BigEndian.Uint32(ip4) - if intIP == (1<<32 - 1) { - return nil, fmt.Errorf("%q will be overflowed", ip) +// Increment the given net.IP by one. Incrementing the last IP in an IP space (IPv4, IPV6) is undefined. +func incrementIPAddr(ip net.IP) { + for i := len(ip) - 1; i >= 0; i-- { + ip[i]++ + // only add to the next byte if we overflowed + if ip[i] != 0 { + break + } } - intIP++ - nextIPv4 := make(net.IP, 4) - binary.BigEndian.PutUint32(nextIPv4, intIP) - return nextIPv4, nil } // GetRuleList returns IP rules diff --git a/pkg/networkutils/network_test.go b/pkg/networkutils/network_test.go index d98862ae9bc..a1c280fa868 100644 --- a/pkg/networkutils/network_test.go +++ b/pkg/networkutils/network_test.go @@ -40,12 +40,15 @@ import ( ) const ( - loopback = "" - testMAC1 = "01:23:45:67:89:a0" - testMAC2 = "01:23:45:67:89:a1" - testTable = 10 - testeniIP = "10.10.10.20" - testeniSubnet = "10.10.0.0/16" + loopback = "" + testMAC1 = "01:23:45:67:89:a0" + testMAC2 = "01:23:45:67:89:a1" + testTable = 10 + testEniIP = "10.10.10.20" + testEniIP6 = "2600::2" + testEniSubnet = "10.10.0.0/16" + testEniV6Subnet = "2600::/64" + testEniV6Gateway = "fe80::c9d:5dff:fec4:f389" // Default MTU of ENI and veth // defined in plugins/routed-eni/driver/driver.go, pkg/networkutils/network.go testMTU = 9001 @@ -53,8 +56,11 @@ const ( ) var ( - _, testENINetIPNet, _ = net.ParseCIDR(testeniSubnet) - testENINetIP = net.ParseIP(testeniIP) + _, testEniSubnetIPNet, _ = net.ParseCIDR(testEniSubnet) + _, testEniV6SubnetIPNet, _ = net.ParseCIDR(testEniV6Subnet) + testEniIPNet = net.ParseIP(testEniIP) + testEniIP6Net = net.ParseIP(testEniIP6) + testEniV6GatewayNet = net.ParseIP(testEniV6Gateway) ) func setup(t *testing.T) (*gomock.Controller, @@ -101,12 +107,12 @@ func TestSetupENINetwork(t *testing.T) { eth1.EXPECT().Attrs().Return(mockLinkAttrs2) eth1.EXPECT().Attrs().Return(mockLinkAttrs2) // eth1's IP address - testeniAddr := &net.IPNet{ - IP: net.ParseIP(testeniIP), - Mask: testENINetIPNet.Mask, + testEniAddr := &net.IPNet{ + IP: net.ParseIP(testEniIP), + Mask: testEniSubnetIPNet.Mask, } mockNetLink.EXPECT().AddrList(gomock.Any(), unix.AF_INET).Return([]netlink.Addr{}, nil) - mockNetLink.EXPECT().AddrAdd(gomock.Any(), &netlink.Addr{IPNet: testeniAddr}).Return(nil) + mockNetLink.EXPECT().AddrAdd(gomock.Any(), &netlink.Addr{IPNet: testEniAddr}).Return(nil) mockNetLink.EXPECT().RouteDel(gomock.Any()) mockNetLink.EXPECT().RouteReplace(gomock.Any()).Return(nil) @@ -116,7 +122,56 @@ func TestSetupENINetwork(t *testing.T) { mockNetLink.EXPECT().RouteDel(gomock.Any()).Return(nil) - err = setupENINetwork(testeniIP, testMAC2, testTable, testeniSubnet, mockNetLink, 0*time.Second, 0*time.Second, testMTU) + _, err = setupENINetwork(testEniIP, testMAC2, testTable, testEniSubnet, mockNetLink, 0*time.Second, 0*time.Second, testMTU) + assert.NoError(t, err) +} + +func TestSetupENIV6Network(t *testing.T) { + ctrl, mockNetLink, _, _, _ := setup(t) + defer ctrl.Finish() + + hwAddr, err := net.ParseMAC(testMAC1) + assert.NoError(t, err) + mockLinkAttrs1 := &netlink.LinkAttrs{ + HardwareAddr: hwAddr, + } + hwAddr, err = net.ParseMAC(testMAC2) + assert.NoError(t, err) + mockLinkAttrs2 := &netlink.LinkAttrs{ + HardwareAddr: hwAddr, + } + lo := mock_netlink.NewMockLink(ctrl) + eth1 := mock_netlink.NewMockLink(ctrl) + // Emulate a delay attaching the ENI so a retry is necessary + // First attempt gets one links + firstlistSet := mockNetLink.EXPECT().LinkList().Return([]netlink.Link{lo}, nil) + lo.EXPECT().Attrs().Return(mockLinkAttrs1) + // Second attempt gets both links + secondlistSet := mockNetLink.EXPECT().LinkList().Return([]netlink.Link{lo, eth1}, nil) + lo.EXPECT().Attrs().Return(mockLinkAttrs1) + eth1.EXPECT().Attrs().Return(mockLinkAttrs2) + gomock.InOrder(firstlistSet, secondlistSet) + mockNetLink.EXPECT().LinkSetMTU(gomock.Any(), testMTU).Return(nil) + mockNetLink.EXPECT().LinkSetUp(gomock.Any()).Return(nil) + // eth1's device + eth1.EXPECT().Attrs().Return(mockLinkAttrs2) + eth1.EXPECT().Attrs().Return(mockLinkAttrs2) + // eth1's IP address + testEniAddr := &net.IPNet{ + IP: net.ParseIP(testEniIP6), + Mask: testEniV6SubnetIPNet.Mask, + } + mockNetLink.EXPECT().AddrList(gomock.Any(), unix.AF_INET6).Return([]netlink.Addr{}, nil) + mockNetLink.EXPECT().AddrAdd(gomock.Any(), &netlink.Addr{IPNet: testEniAddr}).Return(nil) + + mockNetLink.EXPECT().RouteDel(gomock.Any()) + mockNetLink.EXPECT().RouteReplace(gomock.Any()).Return(nil) + + mockNetLink.EXPECT().RouteDel(gomock.Any()) + mockNetLink.EXPECT().RouteReplace(gomock.Any()).Return(nil) + mockNetLink.EXPECT().RouteDel(gomock.Any()).Return(nil) + + _, err = setupENINetwork(testEniIP6, testMAC2, testTable, testEniV6Subnet, mockNetLink, 0*time.Second, 0*time.Second, testMTU) assert.NoError(t, err) } @@ -130,7 +185,7 @@ func TestSetupENINetworkMACFail(t *testing.T) { mockNetLink.EXPECT().LinkList().Return(nil, fmt.Errorf("simulated failure")) } - err := setupENINetwork(testeniIP, testMAC2, testTable, testeniSubnet, mockNetLink, 0*time.Second, 0*time.Second, testMTU) + _, err := setupENINetwork(testEniIP, testMAC2, testTable, testEniSubnet, mockNetLink, 0*time.Second, 0*time.Second, testMTU) assert.Errorf(t, err, "simulated failure") } @@ -138,10 +193,43 @@ func TestSetupENINetworkErrorOnPrimaryENI(t *testing.T) { ctrl, mockNetLink, _, _, _ := setup(t) defer ctrl.Finish() deviceNumber := 0 - err := setupENINetwork(testeniIP, testMAC2, deviceNumber, testeniSubnet, mockNetLink, 0*time.Second, 0*time.Second, testMTU) + _, err := setupENINetwork(testEniIP, testMAC2, deviceNumber, testEniSubnet, mockNetLink, 0*time.Second, 0*time.Second, testMTU) assert.Error(t, err) } +func TestUpdateIPv6GatewayRule(t *testing.T) { + ctrl, mockNetLink, _, _, _ := setup(t) + defer ctrl.Finish() + + ln := &linuxNetwork{ + netLink: mockNetLink, + podSGEnforcingMode: sgpp.EnforcingModeStrict, + } + + icmpRule := netlink.Rule{ + Src: &net.IPNet{IP: testEniV6GatewayNet, Mask: net.CIDRMask(128, 128)}, + IPProto: unix.IPPROTO_ICMPV6, + Table: localRouteTable, + Priority: 0, + Family: unix.AF_INET6, + } + + // Validate rule add in strict mode + mockNetLink.EXPECT().NewRule().Return(&icmpRule) + mockNetLink.EXPECT().RuleAdd(&icmpRule) + + err := ln.UpdateIPv6GatewayRule(&testEniV6GatewayNet) + assert.NoError(t, err) + + // Validate rule del in non-strict mode + ln.podSGEnforcingMode = sgpp.EnforcingModeStandard + mockNetLink.EXPECT().NewRule().Return(&icmpRule) + mockNetLink.EXPECT().RuleDel(&icmpRule) + + err = ln.UpdateIPv6GatewayRule(&testEniV6GatewayNet) + assert.NoError(t, err) +} + func TestSetupHostNetworkNodePortDisabledAndSNATDisabled(t *testing.T) { ctrl, mockNetLink, _, mockNS, mockIptables := setup(t) defer ctrl.Finish() @@ -165,7 +253,7 @@ func TestSetupHostNetworkNodePortDisabledAndSNATDisabled(t *testing.T) { mockNetLink.EXPECT().RuleDel(&mainENIRule) var vpcCIDRs []string - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) } @@ -185,7 +273,7 @@ func TestUpdateRuleListBySrc(t *testing.T) { ln := &linuxNetwork{netLink: mockNetLink} origRule := netlink.Rule{ - Src: testENINetIPNet, + Src: testEniSubnetIPNet, Table: testTable, } testCases := []struct { @@ -216,7 +304,7 @@ func TestUpdateRuleListBySrc(t *testing.T) { mockNetLink.EXPECT().NewRule().Return(&tc.newRule) mockNetLink.EXPECT().RuleAdd(&tc.newRule) - err := ln.UpdateRuleListBySrc(tc.ruleList, *testENINetIPNet) + err := ln.UpdateRuleListBySrc(tc.ruleList, *testEniSubnetIPNet) assert.NoError(t, err) } } @@ -245,7 +333,7 @@ func TestSetupHostNetworkNodePortEnabledAndSNATDisabled(t *testing.T) { log.Debugf("After: mockIPtables.Dp state: ", mockIptables.(*mock_iptables.MockIptables).DataplaneState) var vpcCIDRs []string - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) assert.Equal(t, map[string]map[string][][]string{ @@ -303,7 +391,7 @@ func TestSetupHostNetworkNodePortDisabledAndSNATEnabled(t *testing.T) { var vpcCIDRs []string - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) assert.Equal(t, map[string]map[string][][]string{ @@ -372,7 +460,7 @@ func TestSetupHostNetworkWithExcludeSNATCIDRs(t *testing.T) { setupNetLinkMocks(ctrl, mockNetLink) vpcCIDRs := []string{"10.10.0.0/16", "10.11.0.0/16"} - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) assert.Equal(t, map[string]map[string][][]string{ @@ -440,7 +528,7 @@ func TestSetupHostNetworkCleansUpStaleSNATRules(t *testing.T) { _ = mockIptables.Append("nat", "PREROUTING", "-m", "comment", "--comment", "AWS, CONNMARK", "-j", "CONNMARK", "--restore-mark", "--mask", "0x80") vpcCIDRs := []string{"10.10.0.0/16", "10.11.0.0/16"} - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) assert.Equal(t, @@ -509,7 +597,7 @@ func TestSetupHostNetworkWithDifferentVethPrefix(t *testing.T) { _ = mockIptables.Append("nat", "PREROUTING", "-m", "comment", "--comment", "AWS, CONNMARK", "-j", "CONNMARK", "--restore-mark", "--mask", "0x80") vpcCIDRs := []string{"10.10.0.0/16", "10.11.0.0/16"} - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) assert.Equal(t, map[string]map[string][][]string{ @@ -577,7 +665,7 @@ func TestSetupHostNetworkExternalNATCleanupConnmark(t *testing.T) { // remove exclusions vpcCIDRs := []string{"10.10.0.0/16", "10.11.0.0/16"} - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) assert.Equal(t, @@ -642,7 +730,7 @@ func TestSetupHostNetworkExcludedSNATCIDRsIdempotent(t *testing.T) { // remove exclusions vpcCIDRs := []string{"10.10.0.0/16", "10.11.0.0/16"} - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) assert.Equal(t, @@ -706,7 +794,7 @@ func TestUpdateHostIptablesRules(t *testing.T) { _ = mockIptables.Append("mangle", "PREROUTING", "-m", "comment", "--comment", "AWS, primary ENI", "-i", "vlan+", "-j", "CONNMARK", "--restore-mark", "--mask", "0x80") vpcCIDRs := []string{"10.10.0.0/16", "10.11.0.0/16"} - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) assert.Equal(t, map[string]map[string][][]string{ @@ -762,7 +850,7 @@ func TestSetupHostNetworkMultipleCIDRs(t *testing.T) { setupNetLinkMocks(ctrl, mockNetLink) vpcCIDRs := []string{"10.10.0.0/16", "10.11.0.0/16"} - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) } @@ -787,7 +875,7 @@ func TestSetupHostNetworkWithIPv6Enabled(t *testing.T) { setupNetLinkMocks(ctrl, mockNetLink) var vpcCIDRs []string - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, false, true) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, false, true) assert.NoError(t, err) assert.Equal(t, map[string]map[string][][]string{ @@ -804,28 +892,24 @@ func TestSetupHostNetworkWithIPv6Enabled(t *testing.T) { }, mockIptables.(*mock_iptables.MockIptables).DataplaneState) } -func TestIncrementIPv4Addr(t *testing.T) { +func TestIncrementIPAddr(t *testing.T) { testCases := []struct { name string - ip net.IP - expected net.IP - err bool + ip string + expected string }{ - {"increment", net.IPv4(10, 0, 0, 1), net.IPv4(10, 0, 0, 2).To4(), false}, - {"carry up 1", net.IPv4(10, 0, 0, 255), net.IPv4(10, 0, 1, 0).To4(), false}, - {"carry up 2", net.IPv4(10, 0, 255, 255), net.IPv4(10, 1, 0, 0).To4(), false}, - {"overflow", net.IPv4(255, 255, 255, 255), nil, true}, + {"increment v4", "10.0.0.1", "10.0.0.2"}, + {"increment v6", "2600::", "2600::1"}, + {"carry up 1 v4", "10.0.0.255", "10.0.1.0"}, + {"carry up 2 v4", "10.0.255.255", "10.1.0.0"}, + {"v6 case 2", "2600::5", "2600::6"}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - result, err := IncrementIPv4Addr(tc.ip) - if tc.err { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tc.expected, result, tc.name) + origIP := net.ParseIP(tc.ip) + incrementIPAddr(origIP) + assert.Equal(t, tc.expected, origIP.String()) }) } } @@ -850,7 +934,7 @@ func TestSetupHostNetworkIgnoringRpFilterUpdate(t *testing.T) { setupNetLinkMocks(ctrl, mockNetLink) var vpcCIDRs []string - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) } @@ -878,7 +962,7 @@ func TestSetupHostNetworkUpdateLocalRule(t *testing.T) { mockNetLink.EXPECT() var vpcCIDRs []string - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, true, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, true, true, false) assert.NoError(t, err) } @@ -906,7 +990,7 @@ func TestSetupHostNetworkDeleteOldConnmarkRuleForNonVpcOutboundTraffic(t *testin _ = mockIptables.Append("nat", "PREROUTING", "-i", "eni+", "-m", "comment", "--comment", "AWS, outbound connections", "-m", "state", "--state", "NEW", "-j", "AWS-CONNMARK-CHAIN-0") var vpcCIDRs []string - err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testENINetIP, false, true, false) + err := ln.SetupHostNetwork(vpcCIDRs, loopback, &testEniIPNet, false, true, false) assert.NoError(t, err) var exists bool diff --git a/test/agent/cmd/traffic-client/main.go b/test/agent/cmd/traffic-client/main.go index 66674464898..b25b1f41c68 100644 --- a/test/agent/cmd/traffic-client/main.go +++ b/test/agent/cmd/traffic-client/main.go @@ -169,7 +169,7 @@ func sendAndReceiveResponse(conn net.Conn, serverAddr string) *input.Failure { return failure } - log.Printf("successfully recieved response from server %s: %s", serverAddr, string(buffer)) + log.Printf("successfully received response from server %s: %s", serverAddr, string(buffer)) return nil } diff --git a/test/framework/resources/aws/services/ec2.go b/test/framework/resources/aws/services/ec2.go index 7eceaed67ac..3a5d3ff1f53 100644 --- a/test/framework/resources/aws/services/ec2.go +++ b/test/framework/resources/aws/services/ec2.go @@ -15,6 +15,7 @@ package services import ( "fmt" + "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" @@ -88,17 +89,29 @@ func (d *defaultEC2) DescribeInstance(instanceID string) (*ec2.Instance, error) return describeInstanceOutput.Reservations[0].Instances[0], nil } -func (d *defaultEC2) AuthorizeSecurityGroupIngress(groupID string, protocol string, - fromPort int, toPort int, cidrIP string) error { +func (d *defaultEC2) AuthorizeSecurityGroupIngress(groupID string, protocol string, fromPort int, toPort int, cidrIP string) error { + var ipv4Ranges []*ec2.IpRange + var ipv6Ranges []*ec2.Ipv6Range + if strings.Contains(cidrIP, ":") { + ipv6Ranges = []*ec2.Ipv6Range{ + { + CidrIpv6: aws.String(cidrIP), + }, + } + } else { + ipv4Ranges = []*ec2.IpRange{ + { + CidrIp: aws.String(cidrIP), + }, + } + } + ipPermissions := &ec2.IpPermission{ FromPort: aws.Int64(int64(fromPort)), ToPort: aws.Int64(int64(toPort)), IpProtocol: aws.String(protocol), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(cidrIP), - }, - }, + IpRanges: ipv4Ranges, + Ipv6Ranges: ipv6Ranges, } authorizeSecurityGroupIngressInput := &ec2.AuthorizeSecurityGroupIngressInput{ GroupId: aws.String(groupID), @@ -109,15 +122,28 @@ func (d *defaultEC2) AuthorizeSecurityGroupIngress(groupID string, protocol stri } func (d *defaultEC2) RevokeSecurityGroupIngress(groupID string, protocol string, fromPort int, toPort int, cidrIP string) error { + var ipv4Ranges []*ec2.IpRange + var ipv6Ranges []*ec2.Ipv6Range + if strings.Contains(cidrIP, ":") { + ipv6Ranges = []*ec2.Ipv6Range{ + { + CidrIpv6: aws.String(cidrIP), + }, + } + } else { + ipv4Ranges = []*ec2.IpRange{ + { + CidrIp: aws.String(cidrIP), + }, + } + } + ipPermissions := &ec2.IpPermission{ FromPort: aws.Int64(int64(fromPort)), ToPort: aws.Int64(int64(toPort)), IpProtocol: aws.String(protocol), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(cidrIP), - }, - }, + IpRanges: ipv4Ranges, + Ipv6Ranges: ipv6Ranges, } revokeSecurityGroupIngressInput := &ec2.RevokeSecurityGroupIngressInput{ GroupId: aws.String(groupID), @@ -128,15 +154,28 @@ func (d *defaultEC2) RevokeSecurityGroupIngress(groupID string, protocol string, } func (d *defaultEC2) AuthorizeSecurityGroupEgress(groupID string, protocol string, fromPort int, toPort int, cidrIP string) error { + var ipv4Ranges []*ec2.IpRange + var ipv6Ranges []*ec2.Ipv6Range + if strings.Contains(cidrIP, ":") { + ipv6Ranges = []*ec2.Ipv6Range{ + { + CidrIpv6: aws.String(cidrIP), + }, + } + } else { + ipv4Ranges = []*ec2.IpRange{ + { + CidrIp: aws.String(cidrIP), + }, + } + } + ipPermissions := &ec2.IpPermission{ FromPort: aws.Int64(int64(fromPort)), ToPort: aws.Int64(int64(toPort)), IpProtocol: aws.String(protocol), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(cidrIP), - }, - }, + IpRanges: ipv4Ranges, + Ipv6Ranges: ipv6Ranges, } authorizeSecurityGroupEgressInput := &ec2.AuthorizeSecurityGroupEgressInput{ GroupId: aws.String(groupID), @@ -147,15 +186,28 @@ func (d *defaultEC2) AuthorizeSecurityGroupEgress(groupID string, protocol strin } func (d *defaultEC2) RevokeSecurityGroupEgress(groupID string, protocol string, fromPort int, toPort int, cidrIP string) error { + var ipv4Ranges []*ec2.IpRange + var ipv6Ranges []*ec2.Ipv6Range + if strings.Contains(cidrIP, ":") { + ipv6Ranges = []*ec2.Ipv6Range{ + { + CidrIpv6: aws.String(cidrIP), + }, + } + } else { + ipv4Ranges = []*ec2.IpRange{ + { + CidrIp: aws.String(cidrIP), + }, + } + } + ipPermissions := &ec2.IpPermission{ FromPort: aws.Int64(int64(fromPort)), ToPort: aws.Int64(int64(toPort)), IpProtocol: aws.String(protocol), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(cidrIP), - }, - }, + IpRanges: ipv4Ranges, + Ipv6Ranges: ipv6Ranges, } revokeSecurityGroupEgressInput := &ec2.RevokeSecurityGroupEgressInput{ GroupId: aws.String(groupID), diff --git a/test/integration/pod-eni/security_group_per_pod_suite_test.go b/test/integration/pod-eni/security_group_per_pod_suite_test.go index 82a396aa247..6e248c0cf7f 100644 --- a/test/integration/pod-eni/security_group_per_pod_suite_test.go +++ b/test/integration/pod-eni/security_group_per_pod_suite_test.go @@ -15,7 +15,6 @@ package pod_eni import ( "fmt" - "strings" "testing" "github.com/aws/amazon-vpc-cni-k8s/test/framework" @@ -34,6 +33,8 @@ const AmazonEKSVPCResourceControllerARN = "arn:aws:iam::aws:policy/AmazonEKSVPCR var ( f *framework.Framework err error + // Cluster IP Address Family + isIPv4Cluster = false // Security Group that will be used to to create Security Group Policy securityGroupId string // Ports that will be opened on the Security Group used for testing @@ -42,10 +43,10 @@ var ( metricsPort = 8080 // Maximum number of Branch Interface created across all the self managed nodes totalBranchInterface int - // Cluster Role name derived from cluster Role ARN, used to attach VPC Controller Policy - clusterRoleName string // Cluster security group ID for node to node communication clusterSGID string + v4Zero = "0.0.0.0/0" + v6Zero = "::/0" targetNode corev1.Node // Number of nodes in cluster @@ -60,31 +61,43 @@ func TestSecurityGroupForPods(t *testing.T) { var _ = BeforeSuite(func() { f = framework.New(framework.GlobalOptions) + By("checking if cluster address family is IPv4 or IPv6") + clusterOutput, err := f.CloudServices.EKS().DescribeCluster(f.Options.ClusterName) + Expect(err).NotTo(HaveOccurred()) + if *clusterOutput.Cluster.KubernetesNetworkConfig.IpFamily == "ipv4" { + isIPv4Cluster = true + fmt.Fprint(GinkgoWriter, "cluster is IPv4\n") + } else { + fmt.Fprint(GinkgoWriter, "cluster is IPv6\n") + } + By("creating a new security group used in Security Group Policy") - securityGroupOutput, err := f.CloudServices.EC2().CreateSecurityGroup("pod-eni-automation", + var sgName string + if isIPv4Cluster { + sgName = "pod-eni-automation-v4" + } else { + sgName = "pod-eni-automation-v6" + } + securityGroupOutput, err := f.CloudServices.EC2().CreateSecurityGroup(sgName, "test created by vpc cni automation test suite", f.Options.AWSVPCID) Expect(err).ToNot(HaveOccurred()) securityGroupId = *securityGroupOutput.GroupId By("authorizing egress and ingress on security group for client-server communication") - f.CloudServices.EC2().AuthorizeSecurityGroupEgress(securityGroupId, "TCP", openPort, openPort, "0.0.0.0/0") - f.CloudServices.EC2().AuthorizeSecurityGroupIngress(securityGroupId, "TCP", openPort, openPort, "0.0.0.0/0") - - By("getting the cluster role name") - describeClusterOutput, err := f.CloudServices.EKS().DescribeCluster(f.Options.ClusterName) - Expect(err).ToNot(HaveOccurred()) - clusterRoleName = strings.Split(*describeClusterOutput.Cluster.RoleArn, "/")[1] - - By("attaching the AmazonEKSVPCResourceController policy from the cluster role") - err = f.CloudServices.IAM(). - AttachRolePolicy(AmazonEKSVPCResourceControllerARN, clusterRoleName) - Expect(err).ToNot(HaveOccurred()) + if isIPv4Cluster { + f.CloudServices.EC2().AuthorizeSecurityGroupEgress(securityGroupId, "tcp", openPort, openPort, v4Zero) + f.CloudServices.EC2().AuthorizeSecurityGroupIngress(securityGroupId, "tcp", openPort, openPort, v4Zero) + } else { + f.CloudServices.EC2().AuthorizeSecurityGroupEgress(securityGroupId, "tcp", openPort, openPort, v6Zero) + f.CloudServices.EC2().AuthorizeSecurityGroupIngress(securityGroupId, "tcp", openPort, openPort, v6Zero) + f.CloudServices.EC2().AuthorizeSecurityGroupIngress(securityGroupId, "icmpv6", -1, -1, v6Zero) + } By("getting branch ENI limits") nodeList, err := f.K8sResourceManagers.NodeManager().GetNodes(f.Options.NgNameLabelKey, f.Options.NgNameLabelVal) Expect(err).ToNot(HaveOccurred()) numNodes = len(nodeList.Items) - Expect(numNodes).Should(BeNumerically(">", 1)) + Expect(numNodes).Should(BeNumerically(">=", 1)) node := nodeList.Items[0] instanceID := k8sUtils.GetInstanceIDFromNode(node) @@ -128,8 +141,4 @@ var _ = AfterSuite(func() { By("deleting the security group") err = f.CloudServices.EC2().DeleteSecurityGroup(securityGroupId) Expect(err).ToNot(HaveOccurred()) - - By("detaching the AmazonEKSVPCResourceController policy from the cluster role") - err = f.CloudServices.IAM().DetachRolePolicy(AmazonEKSVPCResourceControllerARN, clusterRoleName) - Expect(err).ToNot(HaveOccurred()) }) diff --git a/test/integration/pod-eni/security_group_per_pod_test.go b/test/integration/pod-eni/security_group_per_pod_test.go index 5d79944c65f..00f0eabe332 100644 --- a/test/integration/pod-eni/security_group_per_pod_test.go +++ b/test/integration/pod-eni/security_group_per_pod_test.go @@ -107,8 +107,10 @@ var _ = Describe("Security Group for Pods Test", func() { ClientPodLabelKey: labelKey, ClientPodLabelVal: clientPodLabelVal, ValidateServerPods: ValidatePodsHaveBranchENI, + IsV6Enabled: !isIPv4Cluster, } + By("performing traffic test") successRate, err := trafficTester.TestTraffic() Expect(err).ToNot(HaveOccurred()) Expect(successRate).Should(BeNumerically(">=", float64(99))) @@ -123,8 +125,13 @@ var _ = Describe("Security Group for Pods Test", func() { // Allow Ingress on cluster security group so client pods can communicate with metric pod // 8080: metric-pod listener port By("Adding an additional Ingress Rule on NodeSecurityGroupID to allow client-to-metric traffic") - err := f.CloudServices.EC2().AuthorizeSecurityGroupIngress(clusterSGID, "TCP", metricsPort, metricsPort, "0.0.0.0/0") - Expect(err).ToNot(HaveOccurred()) + if isIPv4Cluster { + err := f.CloudServices.EC2().AuthorizeSecurityGroupIngress(clusterSGID, "TCP", metricsPort, metricsPort, v4Zero) + Expect(err).ToNot(HaveOccurred()) + } else { + err := f.CloudServices.EC2().AuthorizeSecurityGroupIngress(clusterSGID, "TCP", metricsPort, metricsPort, v6Zero) + Expect(err).ToNot(HaveOccurred()) + } }) It("should have 99%+ success rate", func() { @@ -141,6 +148,7 @@ var _ = Describe("Security Group for Pods Test", func() { ClientPodLabelVal: clientPodLabelVal, ValidateServerPods: ValidatePodsHaveBranchENI, ValidateClientPods: ValidatePodsHaveBranchENI, + IsV6Enabled: !isIPv4Cluster, } successRate, err := t.TestTraffic() @@ -151,8 +159,13 @@ var _ = Describe("Security Group for Pods Test", func() { AfterEach(func() { // Revoke the Ingress rule for traffic from client pods added to Node Security Group By("Revoking the additional Ingress rule added to allow client-to-metric traffic") - err := f.CloudServices.EC2().RevokeSecurityGroupIngress(clusterSGID, "TCP", metricsPort, metricsPort, "0.0.0.0/0") - Expect(err).ToNot(HaveOccurred()) + if isIPv4Cluster { + err := f.CloudServices.EC2().RevokeSecurityGroupIngress(clusterSGID, "TCP", metricsPort, metricsPort, v4Zero) + Expect(err).ToNot(HaveOccurred()) + } else { + err := f.CloudServices.EC2().RevokeSecurityGroupIngress(clusterSGID, "TCP", metricsPort, metricsPort, v6Zero) + Expect(err).ToNot(HaveOccurred()) + } }) }) @@ -175,6 +188,7 @@ var _ = Describe("Security Group for Pods Test", func() { ClientPodLabelKey: labelKey, ClientPodLabelVal: clientPodLabelVal, ValidateServerPods: ValidatePodsHaveBranchENI, + IsV6Enabled: !isIPv4Cluster, } successRate, err := t.TestTraffic() @@ -318,7 +332,14 @@ var _ = Describe("Security Group for Pods Test", func() { }) func GetPodNetworkingValidationInput(podList v1.PodList) input.PodNetworkingValidationInput { + var ipFamily string + if isIPv4Cluster { + ipFamily = "IPv4" + } else { + ipFamily = "IPv6" + } ip := input.PodNetworkingValidationInput{ + IPFamily: ipFamily, VethPrefix: "vlan", PodList: []input.Pod{}, ValidateMTU: true, @@ -326,11 +347,20 @@ func GetPodNetworkingValidationInput(podList v1.PodList) input.PodNetworkingVali } for _, pod := range podList.Items { - ip.PodList = append(ip.PodList, input.Pod{ - PodName: pod.Name, - PodNamespace: pod.Namespace, - PodIPv4Address: pod.Status.PodIP, - }) + if isIPv4Cluster { + ip.PodList = append(ip.PodList, input.Pod{ + PodName: pod.Name, + PodNamespace: pod.Namespace, + PodIPv4Address: pod.Status.PodIP, + }) + } else { + ip.PodList = append(ip.PodList, input.Pod{ + PodName: pod.Name, + PodNamespace: pod.Namespace, + PodIPv6Address: pod.Status.PodIP, + }) + + } } return ip } @@ -379,6 +409,7 @@ func ValidatePodsHaveBranchENI(podList v1.PodList) error { if val, ok := pod.Annotations["vpc.amazonaws.com/pod-eni"]; ok { type ENIDetails struct { IPV4Addr string `json:"privateIp"` + IPV6Addr string `json:"ipv6addr"` ID string `json:"eniId"` } var eniList []ENIDetails @@ -387,11 +418,18 @@ func ValidatePodsHaveBranchENI(podList v1.PodList) error { return fmt.Errorf("failed to unmarshall the branch ENI annotation %v", err) } - if eniList[0].IPV4Addr != pod.Status.PodIP { - return fmt.Errorf("expected the pod to have IP %s but recieved %s", - eniList[0].IPV4Addr, pod.Status.PodIP) - } + if isIPv4Cluster { + if eniList[0].IPV4Addr != pod.Status.PodIP { + return fmt.Errorf("expected the pod to have IP %s but recieved %s", + eniList[0].IPV4Addr, pod.Status.PodIP) + } + } else { + if eniList[0].IPV6Addr != pod.Status.PodIP { + return fmt.Errorf("expected the pod to have IP %s but recieved %s", + eniList[0].IPV6Addr, pod.Status.PodIP) + } + } By(fmt.Sprintf("validating pod %s has branch ENI %s", pod.Name, eniList[0].ID)) } else {