Skip to content

Commit

Permalink
Add FG matchmode changes
Browse files Browse the repository at this point in the history
  • Loading branch information
anish-n committed Feb 18, 2021
1 parent 6594039 commit 298a5bf
Show file tree
Hide file tree
Showing 2 changed files with 277 additions and 129 deletions.
127 changes: 56 additions & 71 deletions ansible/roles/test/files/ptftests/fg_ecmp_test.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
# PTF test contains the test cases for fine grained ecmp, the scenarios of test are as follows:
# create_flows: Sends NUM_FLOWS flows with varying src_Ip and creates a tuple to port map
# initial_hash_check: Checks the the flows from create_flows still end up at the same port
# bank_check: Check that the flows end up on the same bank as before
# withdraw_nh: Withdraw next-hop in one fg nhg bank, and make sure flow redistributes to ports in the fg nhg bank
# add_nh: Add next-hop in one fg nhg bank, and make sure flow redistributes to from ports in same fg nhg bank to added port
# withdraw_bank: Withdraw all next-hops which constitue a bank, and make sure that flows migrate to using the other bank
# add_first_nh: Add 1st next-hop from previously withdrawn bank, and make sure that some flow migrate back to using the next-hop in old bank
# net_port_hashing: Verify hashing of packets to the T1(network) ports such that the packet came from the server



import ipaddress
Expand Down Expand Up @@ -39,8 +42,8 @@ def log(self, message):
logging.info(message)


def trigger_mac_learning(self, exp_ports):
for src_port in exp_ports:
def trigger_mac_learning(self, serv_ports):
for src_port in serv_ports:
pkt = simple_eth_packet(
eth_dst=self.router_mac,
eth_src=self.dataplane.get_mac(0, src_port),
Expand Down Expand Up @@ -78,23 +81,26 @@ def setUp(self):
raise Exception("required parameter 'exp_flow_count' is not present")
self.exp_flow_count = self.test_params['exp_flow_count']

if 'dst_ip' not in self.test_params:
raise Exception("required parameter 'dst_ip' is not present")
self.dst_ip = self.test_params['dst_ip']

if not os.path.isfile(config):
raise Exception("the config file %s doesn't exist" % config)

with open(config) as fp:
graph = json.load(fp)

self.net_ports = graph['net_ports']
self.exp_ports = graph['port_list']
self.serv_ports = graph['serv_ports']
self.exp_port_set_one = graph['bank_0_port']
self.exp_port_set_two = graph['bank_1_port']
self.dst_ip = graph['dst_ip']
self.router_mac = graph['dut_mac']
self.num_flows = graph['num_flows']
self.inner_hashing = graph['inner_hashing']

self.log(self.net_ports)
self.log(self.exp_ports)
self.log(self.serv_ports)
self.log(self.exp_port_set_one)
self.log(self.exp_port_set_two)
self.log(self.dst_ip)
Expand All @@ -104,7 +110,7 @@ def setUp(self):
self.log(self.inner_hashing)
self.log(self.exp_flow_count)

self.trigger_mac_learning(self.exp_ports)
self.trigger_mac_learning(self.serv_ports)
time.sleep(3)


Expand Down Expand Up @@ -143,6 +149,15 @@ def fg_ecmp(self):
tuple_to_port_map ={}
hit_count_map = {}

if not os.path.exists(PERSIST_MAP):
with open(PERSIST_MAP, 'w'): pass
else:
with open(PERSIST_MAP) as fp:
tuple_to_port_map = json.load(fp)

if tuple_to_port_map is None or self.dst_ip not in tuple_to_port_map:
tuple_to_port_map[self.dst_ip] = {}

if self.test_case == 'create_flows':
# Send packets with varying src_ips to create NUM_FLOWS unique flows
# and generate a flow to port map
Expand All @@ -154,177 +169,147 @@ def fg_ecmp(self):
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4)
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
tuple_to_port_map[src_ip] = port_idx
self.test_balancing(hit_count_map)

json.dump(tuple_to_port_map, open(PERSIST_MAP,"w"))
return
tuple_to_port_map[self.dst_ip][src_ip] = port_idx

elif self.test_case == 'initial_hash_check':
with open(PERSIST_MAP) as fp:
tuple_to_port_map = json.load(fp)
assert tuple_to_port_map
# step 2: Send the same flows once again and verify that they end up on the same port
self.log("Ensure that flow to port map is maintained when the same flow is re-sent...")
for src_ip, port in tuple_to_port_map.iteritems():
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4)
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
assert port_idx == port
return

elif self.test_case == 'bank_check':
with open(PERSIST_MAP) as fp:
tuple_to_port_map = json.load(fp)
assert tuple_to_port_map
self.log("Send the same flows once again and verify that they end up on the same bank...")
for src_ip, port in tuple_to_port_map.iteritems():
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4)
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
if port in self.exp_port_set_one:
assert port_idx in self.exp_port_set_one
if port in self.exp_port_set_two:
assert port_idx in self.exp_port_set_two
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
tuple_to_port_map[src_ip] = port_idx
self.test_balancing(hit_count_map)

json.dump(tuple_to_port_map, open(PERSIST_MAP,"w"))
return
tuple_to_port_map[self.dst_ip][src_ip] = port_idx

elif self.test_case == 'withdraw_nh':
self.log("Withdraw next-hop " + str(self.withdraw_nh_port) + " and ensure hash redistribution within correct bank")
with open(PERSIST_MAP) as fp:
tuple_to_port_map = json.load(fp)
assert tuple_to_port_map
if self.withdraw_nh_port in self.exp_port_set_one:
withdraw_port_grp = self.exp_port_set_one
else:
withdraw_port_grp = self.exp_port_set_two
hit_count_map = {}
for src_ip, port in tuple_to_port_map.iteritems():
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4)
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
assert port_idx != self.withdraw_nh_port
if port == self.withdraw_nh_port:
assert port_idx != self.withdraw_nh_port
assert (port_idx in withdraw_port_grp)
tuple_to_port_map[src_ip] = port_idx
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
else:
assert port_idx == port

self.test_balancing(hit_count_map)

json.dump(tuple_to_port_map, open(PERSIST_MAP,"w"))
return

elif self.test_case == 'add_nh':
self.log("Add next-hop " + str(self.add_nh_port) + " and ensure hash redistribution within correct bank")
with open(PERSIST_MAP) as fp:
tuple_to_port_map = json.load(fp)
assert tuple_to_port_map
if self.add_nh_port in self.exp_port_set_one:
add_port_grp = self.exp_port_set_one
else:
add_port_grp = self.exp_port_set_two
hit_count_map = {}
for src_ip, port in tuple_to_port_map.iteritems():
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4)
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
if port_idx == self.add_nh_port:
assert (port in add_port_grp)
tuple_to_port_map[src_ip] = port_idx
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
else:
assert port_idx == port

self.test_balancing(hit_count_map)

json.dump(tuple_to_port_map, open(PERSIST_MAP,"w"))
return

elif self.test_case == 'withdraw_bank':
self.log("Withdraw bank " + str(self.withdraw_nh_bank) + " and ensure hash redistribution is as expected")
with open(PERSIST_MAP) as fp:
tuple_to_port_map = json.load(fp)
assert tuple_to_port_map
if self.withdraw_nh_bank[0] in self.exp_port_set_one:
active_port_grp = self.exp_port_set_two
else:
active_port_grp = self.exp_port_set_one
hit_count_map = {}
for src_ip, port in tuple_to_port_map.iteritems():
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4)
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
if port in self.withdraw_nh_bank:
assert (port_idx in active_port_grp)
tuple_to_port_map[src_ip] = port_idx
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
else:
assert port_idx == port

self.test_balancing(hit_count_map)

json.dump(tuple_to_port_map, open(PERSIST_MAP,"w"))
return

elif self.test_case == 'add_first_nh':
self.log("Add 1st next-hop " + str(self.first_nh) + " and ensure hash redistribution is as expected")
with open(PERSIST_MAP) as fp:
tuple_to_port_map = json.load(fp)
if self.first_nh in self.exp_port_set_one:
active_port_grp = self.exp_port_set_two
else:
active_port_grp = self.exp_port_set_one

assert tuple_to_port_map
hit_count_map = {}
for src_ip, port in tuple_to_port_map.iteritems():
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4)
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
flow_redistribution_in_correct_grp = False
if port_idx in active_port_grp:
assert port_idx == port
flow_redistribution_in_correct_grp = True
elif port_idx == self.first_nh:
flow_redistribution_in_correct_grp = True
tuple_to_port_map[src_ip] = port_idx
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
assert flow_redistribution_in_correct_grp == True

elif self.test_case == 'net_port_hashing':
self.log("Send packets destined to network ports and ensure hash distribution is as expected")

for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.serv_ports)
else:
in_port = self.serv_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.net_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1

self.test_balancing(hit_count_map)
return

else:
self.log("Unsupported testcase " + self.test_case)
return

self.test_balancing(hit_count_map)
json.dump(tuple_to_port_map, open(PERSIST_MAP,"w"))
return


def send_rcv_ip_pkt(self, in_port, sport, dport, src_ip_addr, dst_ip_addr,
dst_port_list, ipv4=True):
Expand Down
Loading

0 comments on commit 298a5bf

Please sign in to comment.