From 9f22ba7f78ed3726b775b483fac19a999a28c8a8 Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Mon, 19 Apr 2021 17:07:59 -0700 Subject: [PATCH] Handle IPv6 and ECMP routes to be programmed to ASIC (#1711) *In case of standby mux, associated routes may come to orch with ifname of tun0. Handle the case when nexthop is non-zero *For ECMP, multiple nexthop IPs can have the same sai nexthop id (tunnel NH). Existing data structure is unable to handle such case. Added a secondary map if nexthops are shared --- cfgmgr/tunnelmgr.cpp | 38 +++++++++++++++++++++++++++++-------- orchagent/muxorch.cpp | 12 ------------ orchagent/neighorch.cpp | 5 ++++- orchagent/routeorch.cpp | 42 ++++++++++++++++++++++++++++++++++++----- tests/test_mux.py | 39 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 110 insertions(+), 26 deletions(-) diff --git a/cfgmgr/tunnelmgr.cpp b/cfgmgr/tunnelmgr.cpp index 327165e73254..7f4dc4dd3d4a 100644 --- a/cfgmgr/tunnelmgr.cpp +++ b/cfgmgr/tunnelmgr.cpp @@ -64,10 +64,21 @@ static int cmdIpTunnelRouteAdd(const std::string& pfx, std::string & res) // ip route add/replace {{ip prefix}} dev {{tunnel intf}} // Replace route if route already exists ostringstream cmd; - cmd << IP_CMD " route replace " - << shellquote(pfx) - << " dev " - << TUNIF; + if (IpPrefix(pfx).isV4()) + { + cmd << IP_CMD " route replace " + << shellquote(pfx) + << " dev " + << TUNIF; + } + else + { + cmd << IP_CMD " -6 route replace " + << shellquote(pfx) + << " dev " + << TUNIF; + } + return swss::exec(cmd.str(), res); } @@ -75,10 +86,21 @@ static int cmdIpTunnelRouteDel(const std::string& pfx, std::string & res) { // ip route del {{ip prefix}} dev {{tunnel intf}} ostringstream cmd; - cmd << IP_CMD " route del " - << shellquote(pfx) - << " dev " - << TUNIF; + if (IpPrefix(pfx).isV4()) + { + cmd << IP_CMD " route del " + << shellquote(pfx) + << " dev " + << TUNIF; + } + else + { + cmd << IP_CMD " -6 route del " + << shellquote(pfx) + << " dev " + << TUNIF; + } + return swss::exec(cmd.str(), res); } diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index db5fd7fa757c..7d629705ab71 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -1278,12 +1278,6 @@ void MuxCableOrch::updateMuxState(string portName, string muxState) void MuxCableOrch::addTunnelRoute(const NextHopKey &nhKey) { - if (!nhKey.ip_address.isV4()) - { - SWSS_LOG_INFO("IPv6 tunnel route add '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str()); - return; - } - vector data; string key, alias = nhKey.alias; @@ -1299,12 +1293,6 @@ void MuxCableOrch::addTunnelRoute(const NextHopKey &nhKey) void MuxCableOrch::removeTunnelRoute(const NextHopKey &nhKey) { - if (!nhKey.ip_address.isV4()) - { - SWSS_LOG_INFO("IPv6 tunnel route remove '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str()); - return; - } - string key, alias = nhKey.alias; IpPrefix pfx = nhKey.ip_address.to_string(); diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 5e978bd039cc..ccbab68c8b2a 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -313,7 +313,10 @@ bool NeighOrch::isNextHopFlagSet(const NextHopKey &nexthop, const uint32_t nh_fl auto nhop = m_syncdNextHops.find(nexthop); - assert(nhop != m_syncdNextHops.end()); + if (nhop == m_syncdNextHops.end()) + { + return false; + } if (nhop->second.nh_flags & nh_flag) { diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 21e63f88c909..7415c33dd48f 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -574,7 +574,7 @@ void RouteOrch::doTask(Consumer& consumer) * way is to create loopback interface and then create * route pointing to it, so that we can traps packets to * CPU */ - if (alias == "eth0" || alias == "docker0" || alias == "tun0" || + if (alias == "eth0" || alias == "docker0" || alias == "lo" || !alias.compare(0, strlen(LOOPBACK_PREFIX), LOOPBACK_PREFIX)) { excp_intfs_flag = true; @@ -599,10 +599,18 @@ void RouteOrch::doTask(Consumer& consumer) if (overlay_nh == false) { + if (alsv[0] == "tun0" && !(IpAddress(ipv[0]).isZero())) + { + alsv[0] = gIntfsOrch->getRouterIntfsAlias(ipv[0]); + } nhg_str = ipv[0] + NH_DELIMITER + alsv[0]; for (uint32_t i = 1; i < ipv.size(); i++) { + if (alsv[i] == "tun0" && !(IpAddress(ipv[i]).isZero())) + { + alsv[i] = gIntfsOrch->getRouterIntfsAlias(ipv[i]); + } nhg_str += NHG_DELIMITER + ipv[i] + NH_DELIMITER + alsv[i]; } @@ -628,6 +636,11 @@ void RouteOrch::doTask(Consumer& consumer) /* add addBlackholeRoute or addRoute support empty nhg */ it = consumer.m_toSync.erase(it); } + /* skip direct routes to tun0 */ + else if (alsv[0] == "tun0") + { + it = consumer.m_toSync.erase(it); + } /* directly connected route to VRF interface which come from kernel */ else if (!alsv[0].compare(0, strlen(VRF_PREFIX), VRF_PREFIX)) { @@ -1009,6 +1022,7 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) vector next_hop_ids; set next_hop_set = nexthops.getNextHops(); std::map nhopgroup_members_set; + std::map> nhopgroup_shared_set; /* Assert each IP address exists in m_syncdNextHops table, * and add the corresponding next_hop_id to next_hop_ids. */ @@ -1029,7 +1043,14 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) sai_object_id_t next_hop_id = m_neighOrch->getNextHopId(it); next_hop_ids.push_back(next_hop_id); - nhopgroup_members_set[next_hop_id] = it; + if (nhopgroup_members_set.find(next_hop_id) == nhopgroup_members_set.end()) + { + nhopgroup_members_set[next_hop_id] = it; + } + else + { + nhopgroup_shared_set[next_hop_id].insert(it); + } } sai_attribute_t nhg_attr; @@ -1103,8 +1124,20 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); // Save the membership into next hop structure - next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] = - nhgm_id; + if (nhopgroup_shared_set.find(nhid) != nhopgroup_shared_set.end()) + { + auto it = nhopgroup_shared_set[nhid].begin(); + next_hop_group_entry.nhopgroup_members[*it] = nhgm_id; + nhopgroup_shared_set[nhid].erase(it); + if (nhopgroup_shared_set[nhid].empty()) + { + nhopgroup_shared_set.erase(nhid); + } + } + else + { + next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] = nhgm_id; + } } /* Increment the ref_count for the next hops used by the next hop group. */ @@ -1118,7 +1151,6 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) next_hop_group_entry.ref_count = 0; m_syncdNextHopGroups[nexthops] = next_hop_group_entry; - return true; } diff --git a/tests/test_mux.py b/tests/test_mux.py index c6c3b9716907..f4fefae651d1 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -59,6 +59,7 @@ def create_vlan_interface(self, confdb, asicdb, dvs): fvs = {"NULL": "NULL"} confdb.create_entry("VLAN_INTERFACE", "Vlan1000", fvs) confdb.create_entry("VLAN_INTERFACE", "Vlan1000|192.168.0.1/24", fvs) + confdb.create_entry("VLAN_INTERFACE", "Vlan1000|fc02:1000::1/64", fvs) dvs.runcmd("config interface startup Ethernet0") dvs.runcmd("config interface startup Ethernet4") @@ -334,6 +335,44 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "active") self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0]) + ps._del(rtprefix) + + # Test IPv6 ECMP routes and start with standby config + self.set_mux_state(appdb, "Ethernet0", "standby") + self.set_mux_state(appdb, "Ethernet4", "standby") + + rtprefix = "2020::/64" + + dvs_route.check_asicdb_deleted_route_entries([rtprefix]) + + ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") + + fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), ("ifname", "tun0,tun0")]) + + ps.set(rtprefix, fvs) + + # Check if route was propagated to ASIC DB + rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) + + # Check for nexthop group and validate nexthop group member in asic db + self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + + # Step: 1 - Change one NH to active and verify ecmp route + self.set_mux_state(appdb, "Ethernet0", "active") + self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) + + # Step: 2 - Change the other NH to active and verify ecmp route + self.set_mux_state(appdb, "Ethernet4", "active") + self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0]) + + # Step: 3 - Change one NH to back to standby and verify ecmp route + self.set_mux_state(appdb, "Ethernet0", "standby") + self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) + + # Step: 4 - Change the other NH to standby and verify ecmp route + self.set_mux_state(appdb, "Ethernet4", "standby") + self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + def get_expected_sai_qualifiers(self, portlist, dvs_acl): expected_sai_qualifiers = {