diff --git a/ansible/roles/test/files/ptftests/fg_ecmp_test.py b/ansible/roles/test/files/ptftests/fg_ecmp_test.py index 2151e1fb92..c121d48fea 100644 --- a/ansible/roles/test/files/ptftests/fg_ecmp_test.py +++ b/ansible/roles/test/files/ptftests/fg_ecmp_test.py @@ -1,10 +1,14 @@ # PTF test contains the test cases for fine grained ecmp, the scenarios of test are as follows: # create_flows: Sends NUM_FLOWS flows with varying src_Ip and creates a tuple to port map # initial_hash_check: Checks the the flows from create_flows still end up at the same port +# hash_check_warm_boot: Similar to initial hash check but this is run during warm boot, accounts for possible flooding during warm boot +# bank_check: Check that the flows end up on the same bank as before # withdraw_nh: Withdraw next-hop in one fg nhg bank, and make sure flow redistributes to ports in the fg nhg bank # add_nh: Add next-hop in one fg nhg bank, and make sure flow redistributes to from ports in same fg nhg bank to added port # withdraw_bank: Withdraw all next-hops which constitue a bank, and make sure that flows migrate to using the other bank # add_first_nh: Add 1st next-hop from previously withdrawn bank, and make sure that some flow migrate back to using the next-hop in old bank +# net_port_hashing: Verify hashing of packets to the T1(network) ports such that the packet came from the server + import ipaddress @@ -39,8 +43,8 @@ def log(self, message): logging.info(message) - def trigger_mac_learning(self, ip_to_port): - for src_ip, src_port in ip_to_port.items(): + def trigger_mac_learning(self, serv_ports): + for src_port in serv_ports: pkt = simple_eth_packet( eth_dst=self.router_mac, eth_src=self.dataplane.get_mac(0, src_port), @@ -78,6 +82,10 @@ def setUp(self): raise Exception("required parameter 'exp_flow_count' is not present") self.exp_flow_count = self.test_params['exp_flow_count'] + if 'dst_ip' not in self.test_params: + raise Exception("required parameter 'dst_ip' is not present") + self.dst_ip = self.test_params['dst_ip'] + if not os.path.isfile(config): raise Exception("the config file %s doesn't exist" % config) @@ -85,29 +93,30 @@ def setUp(self): graph = json.load(fp) self.net_ports = graph['net_ports'] - self.exp_ports = graph['port_list'] + self.serv_ports = graph['serv_ports'] self.exp_port_set_one = graph['bank_0_port'] self.exp_port_set_two = graph['bank_1_port'] - self.dst_ip = graph['dst_ip'] self.router_mac = graph['dut_mac'] - self.ip_to_port = graph['ip_to_port'] self.num_flows = graph['num_flows'] self.inner_hashing = graph['inner_hashing'] self.log(self.net_ports) - self.log(self.exp_ports) + self.log(self.serv_ports) self.log(self.exp_port_set_one) self.log(self.exp_port_set_two) self.log(self.dst_ip) self.log(self.router_mac) self.log(self.test_case) - self.log(self.ip_to_port) self.log(self.num_flows) self.log(self.inner_hashing) self.log(self.exp_flow_count) - self.trigger_mac_learning(self.ip_to_port) - time.sleep(3) + if self.test_case != 'hash_check_warm_boot': + # We send bi-directional traffic during warm boot due to + # fdb clear, so no need to trigger mac learning + # during warm boot. + self.trigger_mac_learning(self.serv_ports) + time.sleep(3) #--------------------------------------------------------------------- @@ -145,6 +154,15 @@ def fg_ecmp(self): tuple_to_port_map ={} hit_count_map = {} + if not os.path.exists(PERSIST_MAP): + with open(PERSIST_MAP, 'w'): pass + else: + with open(PERSIST_MAP) as fp: + tuple_to_port_map = json.load(fp) + + if tuple_to_port_map is None or self.dst_ip not in tuple_to_port_map: + tuple_to_port_map[self.dst_ip] = {} + if self.test_case == 'create_flows': # Send packets with varying src_ips to create NUM_FLOWS unique flows # and generate a flow to port map @@ -156,136 +174,131 @@ def fg_ecmp(self): else: in_port = self.net_ports[0] (port_idx, _) = self.send_rcv_ip_pkt( - in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4) hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 - tuple_to_port_map[src_ip] = port_idx - self.test_balancing(hit_count_map) - - json.dump(tuple_to_port_map, open(PERSIST_MAP,"w")) - return + tuple_to_port_map[self.dst_ip][src_ip] = port_idx elif self.test_case == 'initial_hash_check': - with open(PERSIST_MAP) as fp: - tuple_to_port_map = json.load(fp) - assert tuple_to_port_map - # step 2: Send the same flows once again and verify that they end up on the same port self.log("Ensure that flow to port map is maintained when the same flow is re-sent...") - for src_ip, port in tuple_to_port_map.iteritems(): + for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems(): if self.inner_hashing: in_port = random.choice(self.net_ports) else: in_port = self.net_ports[0] (port_idx, _) = self.send_rcv_ip_pkt( - in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4) assert port_idx == port return + elif self.test_case == 'hash_check_warm_boot': + self.log("Ensure that flow to port map is maintained when the same flow is re-sent...") + total_flood_pkts = 0 + for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems(): + if self.inner_hashing: + in_port = random.choice(self.net_ports) + else: + in_port = self.net_ports[0] + (port_idx, _) = self.send_rcv_ip_pkt_warm( + in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4, port) + if port_idx == -1: + total_flood_pkts = total_flood_pkts + 1 + # Ensure that flooding duration in warm reboot is less than 10% of total packet count + self.log("Number of flood packets were: " + str(total_flood_pkts)) + assert (total_flood_pkts < (0.1 * len(tuple_to_port_map[self.dst_ip]))) + return + + elif self.test_case == 'bank_check': + self.log("Send the same flows once again and verify that they end up on the same bank...") + for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems(): + if self.inner_hashing: + in_port = random.choice(self.net_ports) + else: + in_port = self.net_ports[0] + (port_idx, _) = self.send_rcv_ip_pkt( + in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4) + if port in self.exp_port_set_one: + assert port_idx in self.exp_port_set_one + if port in self.exp_port_set_two: + assert port_idx in self.exp_port_set_two + hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 + tuple_to_port_map[self.dst_ip][src_ip] = port_idx + elif self.test_case == 'withdraw_nh': self.log("Withdraw next-hop " + str(self.withdraw_nh_port) + " and ensure hash redistribution within correct bank") - with open(PERSIST_MAP) as fp: - tuple_to_port_map = json.load(fp) - assert tuple_to_port_map if self.withdraw_nh_port in self.exp_port_set_one: withdraw_port_grp = self.exp_port_set_one else: withdraw_port_grp = self.exp_port_set_two - hit_count_map = {} - for src_ip, port in tuple_to_port_map.iteritems(): + for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems(): if self.inner_hashing: in_port = random.choice(self.net_ports) else: in_port = self.net_ports[0] (port_idx, _) = self.send_rcv_ip_pkt( - in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4) hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 assert port_idx != self.withdraw_nh_port if port == self.withdraw_nh_port: assert port_idx != self.withdraw_nh_port assert (port_idx in withdraw_port_grp) - tuple_to_port_map[src_ip] = port_idx + tuple_to_port_map[self.dst_ip][src_ip] = port_idx else: assert port_idx == port - self.test_balancing(hit_count_map) - - json.dump(tuple_to_port_map, open(PERSIST_MAP,"w")) - return - elif self.test_case == 'add_nh': self.log("Add next-hop " + str(self.add_nh_port) + " and ensure hash redistribution within correct bank") - with open(PERSIST_MAP) as fp: - tuple_to_port_map = json.load(fp) - assert tuple_to_port_map if self.add_nh_port in self.exp_port_set_one: add_port_grp = self.exp_port_set_one else: add_port_grp = self.exp_port_set_two - hit_count_map = {} - for src_ip, port in tuple_to_port_map.iteritems(): + for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems(): if self.inner_hashing: in_port = random.choice(self.net_ports) else: in_port = self.net_ports[0] (port_idx, _) = self.send_rcv_ip_pkt( - in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4) hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 if port_idx == self.add_nh_port: assert (port in add_port_grp) + tuple_to_port_map[self.dst_ip][src_ip] = port_idx else: assert port_idx == port - self.test_balancing(hit_count_map) - - json.dump(tuple_to_port_map, open(PERSIST_MAP,"w")) - return - elif self.test_case == 'withdraw_bank': self.log("Withdraw bank " + str(self.withdraw_nh_bank) + " and ensure hash redistribution is as expected") - with open(PERSIST_MAP) as fp: - tuple_to_port_map = json.load(fp) - assert tuple_to_port_map if self.withdraw_nh_bank[0] in self.exp_port_set_one: active_port_grp = self.exp_port_set_two else: active_port_grp = self.exp_port_set_one - hit_count_map = {} - for src_ip, port in tuple_to_port_map.iteritems(): + for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems(): if self.inner_hashing: in_port = random.choice(self.net_ports) else: in_port = self.net_ports[0] (port_idx, _) = self.send_rcv_ip_pkt( - in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4) hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 if port in self.withdraw_nh_bank: assert (port_idx in active_port_grp) - tuple_to_port_map[src_ip] = port_idx + tuple_to_port_map[self.dst_ip][src_ip] = port_idx else: assert port_idx == port - self.test_balancing(hit_count_map) - - json.dump(tuple_to_port_map, open(PERSIST_MAP,"w")) - return - elif self.test_case == 'add_first_nh': self.log("Add 1st next-hop " + str(self.first_nh) + " and ensure hash redistribution is as expected") - with open(PERSIST_MAP) as fp: - tuple_to_port_map = json.load(fp) if self.first_nh in self.exp_port_set_one: active_port_grp = self.exp_port_set_two else: active_port_grp = self.exp_port_set_one - assert tuple_to_port_map - hit_count_map = {} - for src_ip, port in tuple_to_port_map.iteritems(): + for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems(): if self.inner_hashing: in_port = random.choice(self.net_ports) else: in_port = self.net_ports[0] (port_idx, _) = self.send_rcv_ip_pkt( - in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4) hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 flow_redistribution_in_correct_grp = False if port_idx in active_port_grp: @@ -293,9 +306,21 @@ def fg_ecmp(self): flow_redistribution_in_correct_grp = True elif port_idx == self.first_nh: flow_redistribution_in_correct_grp = True - tuple_to_port_map[src_ip] = port_idx + tuple_to_port_map[self.dst_ip][src_ip] = port_idx assert flow_redistribution_in_correct_grp == True + elif self.test_case == 'net_port_hashing': + self.log("Send packets destined to network ports and ensure hash distribution is as expected") + + for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems(): + if self.inner_hashing: + in_port = random.choice(self.serv_ports) + else: + in_port = self.serv_ports[0] + (port_idx, _) = self.send_rcv_ip_pkt( + in_port, src_port, dst_port, src_ip, dst_ip, self.net_ports, ipv4) + hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 + self.test_balancing(hit_count_map) return @@ -303,9 +328,59 @@ def fg_ecmp(self): self.log("Unsupported testcase " + self.test_case) return + self.test_balancing(hit_count_map) + json.dump(tuple_to_port_map, open(PERSIST_MAP,"w")) + return + + + def verify_packet_warm(test, pkt, port, device_number=0, timeout=None, n_timeout=None): + # This packet verification function accounts for possible flood during warm boot + # We ensure that packets are received on the expected port, and return a special + # return value of -1 to denote that a flood had occured. The caller can use the + # special return value to identify how many packets were flooded. + + if timeout is None: + timeout = ptf.ptfutils.default_timeout + if n_timeout is None: + n_timeout = ptf.ptfutils.default_negative_timeout + logging.debug("Checking for pkt on device %d, port %r", device_number, port) + result = dp_poll(test, device_number=device_number, timeout=timeout, exp_pkt=pkt) + verify_no_other_packets(test, device_number=device_number, timeout=n_timeout) + + if isinstance(result, test.dataplane.PollSuccess): + if result.port != port: + # Flood case, check if packet rcvd on expected port as well + verify_packet(test, pkt, port) + return (-1, None) + else: + return (port, result.packet) + + assert(isinstance(result, test.dataplane.PollFailure)) + test.fail("Did not receive expected packet on any of ports %r for device %d.\n%s" + % (ports, device_number, result.format())) + return (0, None) + + + def send_rcv_ip_pkt_warm(self, in_port, sport, dport, src_ip_addr, dst_ip_addr, + dst_port_list, ipv4=True, exp_port=None): + + # Simulate bidirectional traffic for mac learning, since mac learning(fdb) is flushed + # as part of warm reboot + self.trigger_mac_learning([exp_port]) + + if ipv4: + (matched_index, received) = self.send_rcv_ipv4_pkt(in_port, sport, dport, + src_ip_addr, dst_ip_addr, dst_port_list, exp_port) + else: + (matched_index, received) = self.send_rcv_ipv6_pkt(in_port, sport, dport, + src_ip_addr, dst_ip_addr, dst_port_list, exp_port) + + return (matched_index, received) + def send_rcv_ip_pkt(self, in_port, sport, dport, src_ip_addr, dst_ip_addr, dst_port_list, ipv4=True): + if ipv4: (matched_index, received) = self.send_rcv_ipv4_pkt(in_port, sport, dport, src_ip_addr, dst_ip_addr, dst_port_list) @@ -322,7 +397,7 @@ def send_rcv_ip_pkt(self, in_port, sport, dport, src_ip_addr, dst_ip_addr, def send_rcv_ipv4_pkt(self, in_port, sport, dport, - ip_src, ip_dst, dst_port_list): + ip_src, ip_dst, dst_port_list, exp_port=None): src_mac = self.dataplane.get_mac(0, in_port) rand_int = random.randint(1, 254) @@ -344,7 +419,7 @@ def send_rcv_ipv4_pkt(self, in_port, sport, dport, ip_ttl=64, udp_sport=rand_int, udp_dport=4789, - vxlan_vni=rand_int, + vxlan_vni=20000+rand_int, with_udp_chksum=False, inner_frame=pkt) @@ -356,33 +431,36 @@ def send_rcv_ipv4_pkt(self, in_port, sport, dport, masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") - return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + if exp_port is None: + return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + else: + return self.verify_packet_warm(masked_exp_pkt, exp_port) def send_rcv_ipv6_pkt(self, in_port, sport, dport, - ip_src, ip_dst, dst_port_list): + ip_src, ip_dst, dst_port_list, exp_port=None): src_mac = self.dataplane.get_mac(0, in_port) rand_int = random.randint(1, 254) if self.inner_hashing: pkt = simple_tcp_packet( - eth_dst=self.router_mac, - eth_src=src_mac, - ip_src=ip_src, - ip_dst=ip_dst, - tcp_sport=sport, - tcp_dport=dport, - ip_ttl=64) + eth_dst=self.router_mac, + eth_src=src_mac, + ip_src=ip_src, + ip_dst=ip_dst, + tcp_sport=sport, + tcp_dport=dport, + ip_ttl=64) pkt = simple_vxlanv6_packet( - eth_dst=self.router_mac, - eth_src=src_mac, - ipv6_src='2:2:2::' + str(rand_int), - ipv6_dst=self.dst_ip, - udp_sport=rand_int, - udp_dport=4789, - vxlan_vni=rand_int, - with_udp_chksum=False, - inner_frame=pkt) + eth_dst=self.router_mac, + eth_src=src_mac, + ipv6_src='2:2:2::' + str(rand_int), + ipv6_dst=self.dst_ip, + udp_sport=rand_int, + udp_dport=4789, + vxlan_vni=20000+rand_int, + with_udp_chksum=False, + inner_frame=pkt) else: pkt = simple_tcpv6_packet( eth_dst=self.router_mac, @@ -400,10 +478,12 @@ def send_rcv_ipv6_pkt(self, in_port, sport, dport, masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6, "hlim") - return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + if exp_port is None: + return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + else: + return self.verify_packet_warm(masked_exp_pkt, exp_port) - #--------------------------------------------------------------------- def runTest(self): # Main function which triggers all the tests self.fg_ecmp() diff --git a/tests/ecmp/test_fgnhg.py b/tests/ecmp/test_fgnhg.py index ed4f370be8..f00bcea924 100644 --- a/tests/ecmp/test_fgnhg.py +++ b/tests/ecmp/test_fgnhg.py @@ -1,5 +1,4 @@ import pytest -from datetime import datetime import time import logging @@ -7,6 +6,7 @@ import json from tests.ptf_runner import ptf_runner from tests.common import config_reload +from tests.common.helpers.assertions import pytest_assert from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] @@ -17,28 +17,31 @@ DEFAULT_VLAN_ID = 1000 DEFAULT_VLAN_IPv4 = ipaddress.ip_network(u'200.200.200.0/28') DEFAULT_VLAN_IPv6 = ipaddress.ip_network(u'200:200:200:200::/124') -PREFIX_IPv4 = u'100.50.25.12/32' -PREFIX_IPv6 = u'fc:05::/128' -ARP_CFG = '/tmp/arp_cfg.json' +PREFIX_IPV4_LIST = [u'100.50.25.12/32', u'100.50.25.13/32', u'100.50.25.14/32'] +PREFIX_IPV6_LIST = [u'fc:05::/128', u'fc:06::/128', u'fc:07::/128'] FG_ECMP_CFG = '/tmp/fg_ecmp.json' USE_INNER_HASHING = False NUM_FLOWS = 1000 +ptf_to_dut_port_map = {} -SUPPORTED_TOPO = ['t0'] -SUPPORTED_PLATFORMS = ['mellanox'] +pytestmark = [ + pytest.mark.topology('t0'), + pytest.mark.asic('mellanox'), + pytest.mark.disable_loganalyzer +] logger = logging.getLogger(__name__) -def configure_interfaces(cfg_facts, duthost, ptfhost, ptfadapter, vlan_ip): +def configure_interfaces(cfg_facts, duthost, ptfhost, vlan_ip): config_port_indices = cfg_facts['port_index_map'] port_list = [] eth_port_list = [] ip_to_port = {} bank_0_port = [] bank_1_port = [] + global ptf_to_dut_port_map vlan_members = cfg_facts.get('VLAN_MEMBER', {}) - print vlan_members index = 0 for vlan in cfg_facts['VLAN_MEMBER'].keys(): vlan_id = vlan[4:] @@ -52,6 +55,7 @@ def configure_interfaces(cfg_facts, duthost, ptfhost, ptfadapter, vlan_ip): port_list.append(ptf_port_id) eth_port_list.append(port) index = index + 1 + ptf_to_dut_port_map[ptf_port_id] = port port_list.sort() bank_0_port = port_list[:len(port_list)/2] @@ -68,8 +72,8 @@ def configure_interfaces(cfg_facts, duthost, ptfhost, ptfadapter, vlan_ip): return port_list, ip_to_port, bank_0_port, bank_1_port -def generate_fgnhg_config(duthost, ip_to_port, bank_0_port, bank_1_port, prefix): - if isinstance(ipaddress.ip_network(prefix), ipaddress.IPv4Network): +def generate_fgnhg_config(duthost, ip_to_port, bank_0_port, bank_1_port): + if '.' in ip_to_port.keys()[0]: fgnhg_name = 'fgnhg_v4' else: fgnhg_name = 'fgnhg_v6' @@ -78,7 +82,8 @@ def generate_fgnhg_config(duthost, ip_to_port, bank_0_port, bank_1_port, prefix) fgnhg_data['FG_NHG'] = {} fgnhg_data['FG_NHG'][fgnhg_name] = { - "bucket_size": 125 + "bucket_size": 125, + "match_mode": "nexthop-based" } fgnhg_data['FG_NHG_MEMBER'] = {} @@ -88,14 +93,10 @@ def generate_fgnhg_config(duthost, ip_to_port, bank_0_port, bank_1_port, prefix) bank = "1" fgnhg_data['FG_NHG_MEMBER'][ip] = { "bank": bank, + "link": ptf_to_dut_port_map[port], "FG_NHG": fgnhg_name } - fgnhg_data['FG_NHG_PREFIX'] = {} - fgnhg_data['FG_NHG_PREFIX'][prefix] = { - "FG_NHG": fgnhg_name - } - logger.info("fgnhg entries programmed to DUT " + str(fgnhg_data)) duthost.copy(content=json.dumps(fgnhg_data, indent=2), dest="/tmp/fgnhg.json") duthost.shell("sonic-cfggen -j /tmp/fgnhg.json --write-to-db") @@ -124,14 +125,12 @@ def setup_neighbors(duthost, ptfhost, ip_to_port): duthost.shell("sonic-cfggen -j /tmp/neigh.json --write-to-db") -def create_fg_ptf_config(ptfhost, ip_to_port, port_list, bank_0_port, bank_1_port, router_mac, net_ports, prefix): +def create_fg_ptf_config(ptfhost, ip_to_port, port_list, bank_0_port, bank_1_port, router_mac, net_ports): fg_ecmp = { - "ip_to_port": ip_to_port, - "port_list": port_list, + "serv_ports": port_list, "bank_0_port": bank_0_port, "bank_1_port": bank_1_port, "dut_mac": router_mac, - "dst_ip": prefix.split('/')[0], "net_ports": net_ports, "inner_hashing": USE_INNER_HASHING, "num_flows": NUM_FLOWS @@ -141,180 +140,364 @@ def create_fg_ptf_config(ptfhost, ip_to_port, port_list, bank_0_port, bank_1_por ptfhost.copy(content=json.dumps(fg_ecmp, indent=2), dest=FG_ECMP_CFG) -def setup_test_config(ptfadapter, duthost, ptfhost, cfg_facts, router_mac, net_ports, vlan_ip, prefix): - port_list, ip_to_port, bank_0_port, bank_1_port = configure_interfaces(cfg_facts, duthost, ptfhost, ptfadapter, vlan_ip) - generate_fgnhg_config(duthost, ip_to_port, bank_0_port, bank_1_port, prefix) +def setup_test_config(duthost, ptfhost, cfg_facts, router_mac, net_ports, vlan_ip): + port_list, ip_to_port, bank_0_port, bank_1_port = configure_interfaces(cfg_facts, duthost, ptfhost, vlan_ip) + generate_fgnhg_config(duthost, ip_to_port, bank_0_port, bank_1_port) time.sleep(60) setup_neighbors(duthost, ptfhost, ip_to_port) - create_fg_ptf_config(ptfhost, ip_to_port, port_list, bank_0_port, bank_1_port, router_mac, net_ports, prefix) + create_fg_ptf_config(ptfhost, ip_to_port, port_list, bank_0_port, bank_1_port, router_mac, net_ports) return port_list, ip_to_port, bank_0_port, bank_1_port -def fg_ecmp(ptfhost, duthost, router_mac, net_ports, port_list, ip_to_port, bank_0_port, bank_1_port, prefix): +def configure_dut(duthost, cmd): + logger.info("Configuring dut with " + cmd) + duthost.shell(cmd, executable="/bin/bash") + + +def partial_ptf_runner(ptfhost, test_case, dst_ip, exp_flow_count, **kwargs): + log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}".format(test_case) + params = { + "test_case": test_case, + "dst_ip": dst_ip, + "exp_flow_count": exp_flow_count, + "config_file": FG_ECMP_CFG + } + params.update(kwargs) + + ptf_runner(ptfhost, + "ptftests", + "fg_ecmp_test.FgEcmpTest", + platform_dir="ptftests", + params= params, + qlen=1000, + log_file=log_file) + + +def fg_ecmp(ptfhost, duthost, router_mac, net_ports, port_list, ip_to_port, bank_0_port, bank_1_port, prefix_list): - if isinstance(ipaddress.ip_network(prefix), ipaddress.IPv4Network): + # Init base test params + if isinstance(ipaddress.ip_network(prefix_list[0]), ipaddress.IPv4Network): ipcmd = "ip route" else: ipcmd = "ipv6 route" - for nexthop in ip_to_port: - duthost.shell("vtysh -c 'configure terminal' -c '{} {} {}'".format(ipcmd, prefix, nexthop)) - time.sleep(1) + vtysh_base_cmd = "vtysh -c 'configure terminal'" + vtysh_base_cmd = duthost.get_vtysh_cmd_for_namespace(vtysh_base_cmd, DEFAULT_NAMESPACE) + dst_ip_list = [] + for prefix in prefix_list: + dst_ip_list.append(prefix.split('/')[0]) - test_time = str(datetime.now().strftime('%Y-%m-%d-%H:%M:%S')) + ### Start test in state where 1 link is down, when nexthop addition occurs for link which is down, the nexthop + ### should not go to active + shutdown_link = bank_0_port[0] + dut_if_shutdown = ptf_to_dut_port_map[shutdown_link] + logger.info("Initialize test by creating flows and checking basic ecmp, " + "we start in a state where link " + dut_if_shutdown + " is down") - log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.create_flows.log".format(test_time) + configure_dut(duthost, "config interface shutdown " + dut_if_shutdown) + time.sleep(30) + # Now add the route and nhs + for prefix in prefix_list: + cmd = vtysh_base_cmd + for nexthop in ip_to_port: + cmd = cmd + " -c '{} {} {}'".format(ipcmd, prefix, nexthop) + configure_dut(duthost, cmd) + + time.sleep(3) + + # Calculate expected flow counts per port to verify in ptf host exp_flow_count = {} flows_per_nh = NUM_FLOWS/len(port_list) for port in port_list: exp_flow_count[port] = flows_per_nh + + flows_to_redist = exp_flow_count[shutdown_link] + for port in bank_0_port: + if port != shutdown_link: + exp_flow_count[port] = exp_flow_count[port] + flows_to_redist/(len(bank_0_port) - 1) + del exp_flow_count[shutdown_link] - ptf_runner(ptfhost, - "ptftests", - "fg_ecmp_test.FgEcmpTest", - platform_dir="ptftests", - params={"test_case": 'create_flows', - "exp_flow_count": exp_flow_count, - "config_file": FG_ECMP_CFG}, - qlen=1000, - log_file=log_file) + # Send the packets + for dst_ip in dst_ip_list: + partial_ptf_runner(ptfhost, 'create_flows', dst_ip, exp_flow_count) - log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.initial_hash_check.log".format(test_time) - ptf_runner(ptfhost, - "ptftests", - "fg_ecmp_test.FgEcmpTest", - platform_dir="ptftests", - params={"test_case": 'initial_hash_check', - "exp_flow_count": exp_flow_count, - "config_file": FG_ECMP_CFG}, - qlen=1000, - log_file=log_file) + ### Hashing verification: Send the same flows again, + ### and verify packets end up on the same ports for a given flow + logger.info("Hashing verification: Send the same flows again, " + "and verify packets end up on the same ports for a given flow") - exp_flow_count = {} - flows_for_withdrawn_nh_bank = (NUM_FLOWS/2)/(len(bank_0_port) - 1) - withdraw_nh_port = bank_0_port[1] - for port in bank_1_port: + for dst_ip in dst_ip_list: + partial_ptf_runner(ptfhost, 'initial_hash_check', dst_ip, exp_flow_count) + + + ### Send the same flows again, but unshut the port which was shutdown at the beginning of test + ### Check if hash buckets rebalanced as expected + logger.info("Send the same flows again, but unshut " + dut_if_shutdown + " and check " + "if flows reblanced as expected and are seen on now brought up link") + + configure_dut(duthost, "config interface startup " + dut_if_shutdown) + time.sleep(30) + + flows_per_nh = NUM_FLOWS/len(port_list) + for port in port_list: exp_flow_count[port] = flows_per_nh + + for dst_ip in dst_ip_list: + partial_ptf_runner(ptfhost, 'add_nh', dst_ip, exp_flow_count, add_nh_port=shutdown_link) + + + ### Send the same flows again, but withdraw one next-hop before sending the flows, check if hash bucket + ### rebalanced as expected, and the number of flows received on a link is as expected + logger.info("Send the same flows again, but withdraw one next-hop before sending the flows, check if hash bucket " + "rebalanced as expected, and the number of flows received on a link is as expected") + + # Modify and test 1 prefix only for the rest of this test + dst_ip = dst_ip_list[0] + prefix = prefix_list[0] + + withdraw_nh_port = bank_0_port[1] + cmd = vtysh_base_cmd + for nexthop, port in ip_to_port.items(): + if port == withdraw_nh_port: + cmd = cmd + " -c 'no {} {} {}'".format(ipcmd, prefix, nexthop) + configure_dut(duthost, cmd) + time.sleep(3) + + flows_for_withdrawn_nh_bank = (NUM_FLOWS/2)/(len(bank_0_port) - 1) for port in bank_0_port: if port != withdraw_nh_port: exp_flow_count[port] = flows_for_withdrawn_nh_bank + del exp_flow_count[withdraw_nh_port] - for nexthop, port in ip_to_port.items(): - if port == withdraw_nh_port: - duthost.shell("vtysh -c 'configure terminal' -c 'no {} {} {}'".format(ipcmd, prefix, nexthop)) + # Validate packets with withdrawn nhs + partial_ptf_runner(ptfhost, 'withdraw_nh', dst_ip, exp_flow_count, withdraw_nh_port=withdraw_nh_port) + # Validate that the other 2 prefixes using Fine Grained ECMP were unaffected + for ip in dst_ip_list: + if ip == dst_ip: continue + partial_ptf_runner(ptfhost, 'initial_hash_check', ip, exp_flow_count) - log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.withdraw_nh.log".format(test_time) + ### Send the same flows again, but disable one of the links, + ### and check flow hash redistribution + shutdown_link = bank_0_port[2] + dut_if_shutdown = ptf_to_dut_port_map[shutdown_link] + logger.info("Send the same flows again, but shutdown " + dut_if_shutdown + " and check " + "the flow hash redistribution") - time.sleep(1) + configure_dut(duthost, "config interface shutdown " + dut_if_shutdown) + time.sleep(30) - ptf_runner(ptfhost, - "ptftests", - "fg_ecmp_test.FgEcmpTest", - platform_dir="ptftests", - params={"test_case": 'withdraw_nh', - "config_file": FG_ECMP_CFG, - "exp_flow_count": exp_flow_count, - "withdraw_nh_port": withdraw_nh_port}, - qlen=1000, - log_file=log_file) + flows_for_shutdown_links_bank = (NUM_FLOWS/2)/(len(bank_0_port) - 2) + for port in bank_0_port: + if port != withdraw_nh_port and port != shutdown_link: + exp_flow_count[port] = flows_for_shutdown_links_bank + del exp_flow_count[shutdown_link] + + partial_ptf_runner(ptfhost, 'withdraw_nh', dst_ip, exp_flow_count, withdraw_nh_port=shutdown_link) + ### Send the same flows again, but enable the link we disabled the last time + ### and check flow hash redistribution + logger.info("Send the same flows again, but startup " + dut_if_shutdown + " and check " + "the flow hash redistribution") + + configure_dut(duthost, "config interface startup " + dut_if_shutdown) + time.sleep(30) + exp_flow_count = {} - for port in port_list: + flows_for_withdrawn_nh_bank = (NUM_FLOWS/2)/(len(bank_0_port) - 1) + for port in bank_1_port: exp_flow_count[port] = flows_per_nh + for port in bank_0_port: + if port != withdraw_nh_port: + exp_flow_count[port] = flows_for_withdrawn_nh_bank + partial_ptf_runner(ptfhost, 'add_nh', dst_ip, exp_flow_count, add_nh_port=shutdown_link) + + + ### Send the same flows again, but enable the next-hop which was down previously + ### and check flow hash redistribution + logger.info("Send the same flows again, but enable the next-hop which was down previously " + " and check flow hash redistribution") + + cmd = vtysh_base_cmd for nexthop, port in ip_to_port.items(): if port == withdraw_nh_port: - duthost.shell("vtysh -c 'configure terminal' -c '{} {} {}'".format(ipcmd, prefix, nexthop)) + cmd = cmd + " -c '{} {} {}'".format(ipcmd, prefix, nexthop) + configure_dut(duthost, cmd) + time.sleep(3) + exp_flow_count = {} + flows_per_nh = NUM_FLOWS/len(port_list) + for port in port_list: + exp_flow_count[port] = flows_per_nh - log_file = "/tmp/fg_ecmp_test.FgEcmpTest.add_nh.{}.log".format(test_time) + partial_ptf_runner(ptfhost, 'add_nh', dst_ip, exp_flow_count, add_nh_port=withdraw_nh_port) - time.sleep(1) - ptf_runner(ptfhost, - "ptftests", - "fg_ecmp_test.FgEcmpTest", - platform_dir="ptftests", - params={"test_case": 'add_nh', - "config_file": FG_ECMP_CFG, - "exp_flow_count": exp_flow_count, - "add_nh_port": withdraw_nh_port}, - qlen=1000, - log_file=log_file) + ### Simulate route and link flap conditions by toggling the route + ### and ensure that there is no orch crash and data plane impact + logger.info("Simulate route and link flap conditions by toggling the route " + "and ensure that there is no orch crash and data plane impact") + nexthop_to_toggle = ip_to_port.keys()[0] + cmd = "for i in {1..50}; do " + cmd = cmd + vtysh_base_cmd + cmd = cmd + " -c 'no {} {} {}';".format(ipcmd, prefix, nexthop_to_toggle) + cmd = cmd + " sleep 0.5;" + cmd = cmd + vtysh_base_cmd + cmd = cmd + " -c '{} {} {}';".format(ipcmd, prefix, nexthop_to_toggle) + cmd = cmd + " sleep 0.5;" + cmd = cmd + " done;" - withdraw_nh_bank = bank_0_port - for nexthop, port in ip_to_port.items(): - if port in withdraw_nh_bank: - duthost.shell("vtysh -c 'configure terminal' -c 'no {} {} {}'".format(ipcmd, prefix, nexthop)) + configure_dut(duthost, cmd) + time.sleep(30) + result = duthost.shell(argv=["pgrep", "orchagent"]) + pytest_assert(int(result["stdout"]) > 0, "Orchagent is not running") + partial_ptf_runner(ptfhost, 'bank_check', dst_ip, exp_flow_count) - log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.withdraw_bank.log".format(test_time) - time.sleep(1) + ### Send the same flows again, but disable all next-hops in a bank + ### to test flow redistribution to the other bank + logger.info("Send the same flows again, but disable all next-hops in a bank " + "to test flow redistribution to the other bank") + + withdraw_nh_bank = bank_0_port + + cmd = vtysh_base_cmd + for nexthop, port in ip_to_port.items(): + if port in withdraw_nh_bank: + cmd = cmd + " -c 'no {} {} {}'".format(ipcmd, prefix, nexthop) + configure_dut(duthost, cmd) + time.sleep(3) exp_flow_count = {} flows_per_nh = NUM_FLOWS/len(bank_1_port) for port in bank_1_port: exp_flow_count[port] = flows_per_nh - ptf_runner(ptfhost, - "ptftests", - "fg_ecmp_test.FgEcmpTest", - platform_dir="ptftests", - params={"test_case": 'withdraw_bank', - "config_file": FG_ECMP_CFG, - "exp_flow_count": exp_flow_count, - "withdraw_nh_bank": withdraw_nh_bank}, - qlen=1000, - log_file=log_file) + partial_ptf_runner(ptfhost, 'withdraw_bank', dst_ip, exp_flow_count, withdraw_nh_bank=withdraw_nh_bank) + + + ### Send the same flows again, but enable 1 next-hop in a previously down bank to check + ### if flows redistribute back to previously down bank + logger.info("Send the same flows again, but enable 1 next-hop in a previously down bank to check " + "if flows redistribute back to previously down bank") first_nh = bank_0_port[3] + + cmd = vtysh_base_cmd for nexthop, port in ip_to_port.items(): if port == first_nh: - duthost.shell("vtysh -c 'configure terminal' -c '{} {} {}'".format(ipcmd, prefix, nexthop)) - - log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.add_first_nh.log".format(test_time) - - time.sleep(1) + cmd = cmd + " -c '{} {} {}'".format(ipcmd, prefix, nexthop) + configure_dut(duthost, cmd) + time.sleep(3) exp_flow_count = {} flows_per_nh = (NUM_FLOWS/2)/(len(bank_1_port)) for port in bank_1_port: exp_flow_count[port] = flows_per_nh - exp_flow_count[first_nh] = NUM_FLOWS/2 - ptf_runner(ptfhost, - "ptftests", - "fg_ecmp_test.FgEcmpTest", - platform_dir="ptftests", - params={"test_case": 'add_first_nh', - "config_file": FG_ECMP_CFG, - "exp_flow_count": exp_flow_count, - "first_nh": first_nh}, - qlen=1000, - log_file=log_file) + partial_ptf_runner(ptfhost, 'add_first_nh', dst_ip, exp_flow_count, first_nh=first_nh) + + logger.info("Completed ...") + + +def fg_ecmp_to_regular_ecmp_transitions(ptfhost, duthost, router_mac, net_ports, port_list, ip_to_port, bank_0_port, bank_1_port, prefix_list, cfg_facts): + logger.info("fg_ecmp_to_regular_ecmp_transitions") + # Init base test params + ipv4 = False + if isinstance(ipaddress.ip_network(prefix_list[0]), ipaddress.IPv4Network): + ipcmd = "ip route" + ipv4 = True + else: + ipcmd = "ipv6 route" + + vtysh_base_cmd = "vtysh -c 'configure terminal'" + vtysh_base_cmd = duthost.get_vtysh_cmd_for_namespace(vtysh_base_cmd, DEFAULT_NAMESPACE) + dst_ip_list = [] + for prefix in prefix_list: + dst_ip_list.append(prefix.split('/')[0]) + + prefix = prefix_list[0] + dst_ip = dst_ip_list[0] + + logger.info("Transition prefix to non fine grained ecmp and validate packets") + + pc_ips = [] + for ip in cfg_facts['BGP_NEIGHBOR']: + if ipv4 and '.' in ip: + pc_ips.append(ip) + elif not ipv4 and ':' in ip: + pc_ips.append(ip) + + # Init flows + exp_flow_count = {} + flows_per_nh = (NUM_FLOWS)/(len(port_list)) + for port in port_list: + exp_flow_count[port] = flows_per_nh + for ip in dst_ip_list: + if ip == dst_ip: continue + partial_ptf_runner(ptfhost, 'create_flows', ip, exp_flow_count) + + cmd = vtysh_base_cmd + for ip in pc_ips: + cmd = cmd + " -c '{} {} {}'".format(ipcmd, prefix, ip) + for nexthop in ip_to_port.keys(): + cmd = cmd + " -c 'no {} {} {}'".format(ipcmd, prefix, nexthop) + configure_dut(duthost, cmd) + time.sleep(3) + + exp_flow_count = {} + flows_per_nh = (NUM_FLOWS)/(len(net_ports)) + for port in net_ports: + exp_flow_count[port] = flows_per_nh + + partial_ptf_runner(ptfhost, 'net_port_hashing', dst_ip, exp_flow_count) + + # Validate that the other 2 prefixes using Fine Grained ECMP were unaffected + exp_flow_count = {} + flows_per_nh = (NUM_FLOWS)/(len(port_list)) + for port in port_list: + exp_flow_count[port] = flows_per_nh + for ip in dst_ip_list: + if ip == dst_ip: continue + partial_ptf_runner(ptfhost, 'initial_hash_check', ip, exp_flow_count) + + + ### Transition prefix back to fine grained ecmp and validate packets + logger.info("Transition prefix back to fine grained ecmp and validate packets") + + cmd = vtysh_base_cmd + for nexthop in ip_to_port.keys(): + cmd = cmd + " -c '{} {} {}'".format(ipcmd, prefix, nexthop) + for ip in pc_ips: + cmd = cmd + " -c 'no {} {} {}'".format(ipcmd, prefix, ip) + configure_dut(duthost, cmd) + time.sleep(3) + + partial_ptf_runner(ptfhost, 'create_flows', dst_ip, exp_flow_count) + + # Validate that the other 2 prefixes using Fine Grained ECMP were unaffected + for ip in dst_ip_list: + if ip == dst_ip: continue + partial_ptf_runner(ptfhost, 'initial_hash_check', ip, exp_flow_count) -def cleanup(duthost): +def cleanup(duthost, ptfhost): + logger.info("Start cleanup") + ptfhost.command('rm /tmp/fg_ecmp_persist_map.json') config_reload(duthost) @pytest.fixture(scope="module") -def common_setup_teardown(tbinfo, duthosts, rand_one_dut_hostname): +def common_setup_teardown(tbinfo, duthosts, rand_one_dut_hostname, ptfhost): duthost = duthosts[rand_one_dut_hostname] - if tbinfo['topo']['name'] not in SUPPORTED_TOPO: - logger.warning("Unsupported topology, currently supports " + str(SUPPORTED_TOPO)) - pytest.skip("Unsupported topology") - if duthost.facts["asic_type"] not in SUPPORTED_PLATFORMS: - logger.warning("Unsupported platform, currently supports " + str(SUPPORTED_PLATFORMS)) - pytest.skip("Unsupported platform") try: mg_facts = duthost.get_extended_minigraph_facts(tbinfo) @@ -327,16 +510,18 @@ def common_setup_teardown(tbinfo, duthosts, rand_one_dut_hostname): yield duthost, cfg_facts, router_mac, net_ports finally: - cleanup(duthost) + cleanup(duthost, ptfhost) -def test_fg_ecmp(common_setup_teardown, ptfadapter, ptfhost): +def test_fg_ecmp(common_setup_teardown, ptfhost): duthost, cfg_facts, router_mac, net_ports = common_setup_teardown # IPv4 test - port_list, ip_to_port, bank_0_port, bank_1_port = setup_test_config(ptfadapter, duthost, ptfhost, cfg_facts, router_mac, net_ports, DEFAULT_VLAN_IPv4, PREFIX_IPv4) - fg_ecmp(ptfhost, duthost, router_mac, net_ports, port_list, ip_to_port, bank_0_port, bank_1_port, PREFIX_IPv4) + port_list, ipv4_to_port, bank_0_port, bank_1_port = setup_test_config(duthost, ptfhost, cfg_facts, router_mac, net_ports, DEFAULT_VLAN_IPv4) + fg_ecmp(ptfhost, duthost, router_mac, net_ports, port_list, ipv4_to_port, bank_0_port, bank_1_port, PREFIX_IPV4_LIST) + fg_ecmp_to_regular_ecmp_transitions(ptfhost, duthost, router_mac, net_ports, port_list, ipv4_to_port, bank_0_port, bank_1_port, PREFIX_IPV4_LIST, cfg_facts) # IPv6 test - port_list, ip_to_port, bank_0_port, bank_1_port = setup_test_config(ptfadapter, duthost, ptfhost, cfg_facts, router_mac, net_ports, DEFAULT_VLAN_IPv6, PREFIX_IPv6) - fg_ecmp(ptfhost, duthost, router_mac, net_ports, port_list, ip_to_port, bank_0_port, bank_1_port, PREFIX_IPv6) + port_list, ipv6_to_port, bank_0_port, bank_1_port = setup_test_config(duthost, ptfhost, cfg_facts, router_mac, net_ports, DEFAULT_VLAN_IPv6) + fg_ecmp(ptfhost, duthost, router_mac, net_ports, port_list, ipv6_to_port, bank_0_port, bank_1_port, PREFIX_IPV6_LIST) + fg_ecmp_to_regular_ecmp_transitions(ptfhost, duthost, router_mac, net_ports, port_list, ipv6_to_port, bank_0_port, bank_1_port, PREFIX_IPV6_LIST, cfg_facts)