diff --git a/cfgmgr/tunnelmgr.cpp b/cfgmgr/tunnelmgr.cpp index 848b4dba6bab..327165e73254 100644 --- a/cfgmgr/tunnelmgr.cpp +++ b/cfgmgr/tunnelmgr.cpp @@ -6,49 +6,154 @@ #include "logger.h" #include "tunnelmgr.h" +#include "tokenize.h" +#include "shellcmd.h" +#include "exec.h" using namespace std; using namespace swss; -TunnelMgr::TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, std::string tableName) : - Orch(cfgDb, tableName), - m_appIpInIpTunnelTable(appDb, APP_TUNNEL_DECAP_TABLE_NAME) +#define IPINIP "IPINIP" +#define TUNIF "tun0" +#define LOOPBACK_SRC "Loopback3" + +static int cmdIpTunnelIfCreate(const swss::TunnelInfo & info, std::string & res) +{ + // ip tunnel add {{tunnel intf}} mode ipip local {{dst ip}} remote {{remote ip}} + ostringstream cmd; + cmd << IP_CMD " tunnel add " + << TUNIF << " mode ipip local " + << shellquote(info.dst_ip) + << " remote " + << shellquote(info.remote_ip); + return swss::exec(cmd.str(), res); +} + +static int cmdIpTunnelIfRemove(std::string & res) { + // ip tunnel del {{tunnel intf}} + ostringstream cmd; + cmd << IP_CMD " tunnel del " + << TUNIF; + return swss::exec(cmd.str(), res); +} + +static int cmdIpTunnelIfUp(std::string & res) +{ + // ip link set dev {{tunnel intf}} up + ostringstream cmd; + cmd << IP_CMD " link set dev " + << TUNIF + << " up"; + return swss::exec(cmd.str(), res); +} + +static int cmdIpTunnelIfAddress(const std::string& ip, std::string & res) +{ + // ip addr add {{loopback3 ip}} dev {{tunnel intf}} + ostringstream cmd; + cmd << IP_CMD " addr add " + << shellquote(ip) + << " dev " + << TUNIF; + return swss::exec(cmd.str(), res); +} + +static int cmdIpTunnelRouteAdd(const std::string& pfx, std::string & res) +{ + // ip route add/replace {{ip prefix}} dev {{tunnel intf}} + // Replace route if route already exists + ostringstream cmd; + cmd << IP_CMD " route replace " + << shellquote(pfx) + << " dev " + << TUNIF; + return swss::exec(cmd.str(), res); +} + +static int cmdIpTunnelRouteDel(const std::string& pfx, std::string & res) +{ + // ip route del {{ip prefix}} dev {{tunnel intf}} + ostringstream cmd; + cmd << IP_CMD " route del " + << shellquote(pfx) + << " dev " + << TUNIF; + return swss::exec(cmd.str(), res); +} + +TunnelMgr::TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames) : + Orch(cfgDb, tableNames), + m_appIpInIpTunnelTable(appDb, APP_TUNNEL_DECAP_TABLE_NAME), + m_cfgPeerTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME) +{ + std::vector peer_keys; + m_cfgPeerTable.getKeys(peer_keys); + + for (auto i: peer_keys) + { + std::vector fvs; + m_cfgPeerTable.get(i, fvs); + + for (auto j: fvs) + { + if (fvField(j) == "address_ipv4") + { + m_peerIp = fvValue(j); + break; + } + } + } + + auto consumerStateTable = new swss::ConsumerStateTable(appDb, APP_TUNNEL_ROUTE_TABLE_NAME, + TableConsumable::DEFAULT_POP_BATCH_SIZE, default_orch_pri); + auto consumer = new Consumer(consumerStateTable, this, APP_TUNNEL_ROUTE_TABLE_NAME); + Orch::addExecutor(consumer); + + // Cleanup any existing tunnel intf + std::string res; + cmdIpTunnelIfRemove(res); } void TunnelMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); + string table_name = consumer.getTableName(); + auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { - bool task_result = false; + bool task_result = true; KeyOpFieldsValuesTuple t = it->second; - const vector& data = kfvFieldsValues(t); - const std::string & op = kfvOp(t); if (op == SET_COMMAND) { - for (auto idx : data) + if (table_name == CFG_LOOPBACK_INTERFACE_TABLE_NAME) + { + task_result = doLpbkIntfTask(t); + } + else if (table_name == CFG_TUNNEL_TABLE_NAME) { - const auto &field = fvField(idx); - const auto &value = fvValue(idx); - if (field == "tunnel_type") - { - if (value == "IPINIP") - { - task_result = doIpInIpTunnelTask(t); - } - } + task_result = doTunnelTask(t); + } + else if (table_name == APP_TUNNEL_ROUTE_TABLE_NAME) + { + task_result = doTunnelRouteTask(t); } } else if (op == DEL_COMMAND) { - /* TODO: Handle Tunnel delete for other tunnel types */ - task_result = doIpInIpTunnelTask(t); + if (table_name == CFG_TUNNEL_TABLE_NAME) + { + task_result = doTunnelTask(t); + } + else if (table_name == APP_TUNNEL_ROUTE_TABLE_NAME) + { + task_result = doTunnelRouteTask(t); + } } else { @@ -66,22 +171,166 @@ void TunnelMgr::doTask(Consumer &consumer) } } -bool TunnelMgr::doIpInIpTunnelTask(const KeyOpFieldsValuesTuple & t) +bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) { SWSS_LOG_ENTER(); - const std::string & TunnelName = kfvKey(t); + const std::string & tunnelName = kfvKey(t); const std::string & op = kfvOp(t); + TunnelInfo tunInfo; + + for (auto fieldValue : kfvFieldsValues(t)) + { + const std::string & field = fvField(fieldValue); + const std::string & value = fvValue(fieldValue); + if (field == "dst_ip") + { + tunInfo.dst_ip = value; + } + else if (field == "tunnel_type") + { + tunInfo.type = value; + } + } if (op == SET_COMMAND) { - m_appIpInIpTunnelTable.set(TunnelName, kfvFieldsValues(t)); + if (tunInfo.type == IPINIP) + { + tunInfo.remote_ip = m_peerIp; + + if (!m_peerIp.empty() && !configIpTunnel(tunInfo)) + { + return false; + } + else if (m_peerIp.empty()) + { + SWSS_LOG_NOTICE("Peer/Remote IP not configured"); + } + + m_appIpInIpTunnelTable.set(tunnelName, kfvFieldsValues(t)); + } + m_tunnelCache[tunnelName] = tunInfo; } else { - m_appIpInIpTunnelTable.del(TunnelName); + auto it = m_tunnelCache.find(tunnelName); + + if (it == m_tunnelCache.end()) + { + SWSS_LOG_ERROR("Tunnel %s not found", tunnelName.c_str()); + return true; + } + + tunInfo = it->second; + if (tunInfo.type == IPINIP) + { + m_appIpInIpTunnelTable.del(tunnelName); + } + else + { + SWSS_LOG_WARN("Tunnel %s type %s is not handled", tunnelName.c_str(), tunInfo.type.c_str()); + } + m_tunnelCache.erase(tunnelName); } - SWSS_LOG_NOTICE("Tunnel %s task, op %s", TunnelName.c_str(), op.c_str()); + SWSS_LOG_NOTICE("Tunnel %s task, op %s", tunnelName.c_str(), op.c_str()); + return true; +} + +bool TunnelMgr::doLpbkIntfTask(const KeyOpFieldsValuesTuple & t) +{ + SWSS_LOG_ENTER(); + + vector keys = tokenize(kfvKey(t), config_db_key_delimiter); + + /* Skip entry with just interface name. Need to handle only IP prefix*/ + if (keys.size() == 1) + { + return true; + } + + string alias(keys[0]); + IpPrefix ipPrefix(keys[1]); + + m_intfCache[alias] = ipPrefix; + + if (alias == LOOPBACK_SRC && !m_tunnelCache.empty()) + { + int ret = 0; + std::string res; + ret = cmdIpTunnelIfAddress(ipPrefix.to_string(), res); + if (ret != 0) + { + SWSS_LOG_WARN("Failed to assign IP addr for tun if %s, res %s", + ipPrefix.to_string().c_str(), res.c_str()); + } + } + + SWSS_LOG_NOTICE("Loopback intf %s saved %s", alias.c_str(), ipPrefix.to_string().c_str()); + return true; +} + +bool TunnelMgr::doTunnelRouteTask(const KeyOpFieldsValuesTuple & t) +{ + SWSS_LOG_ENTER(); + + const std::string & prefix = kfvKey(t);; + const std::string & op = kfvOp(t); + + int ret = 0; + std::string res; + if (op == SET_COMMAND) + { + ret = cmdIpTunnelRouteAdd(prefix, res); + if (ret != 0) + { + SWSS_LOG_WARN("Failed to add route %s, res %s", prefix.c_str(), res.c_str()); + } + } + else + { + ret = cmdIpTunnelRouteDel(prefix, res); + if (ret != 0) + { + SWSS_LOG_WARN("Failed to del route %s, res %s", prefix.c_str(), res.c_str()); + } + } + + SWSS_LOG_INFO("Route updated to kernel %s, op %s", prefix.c_str(), op.c_str()); + return true; +} + + +bool TunnelMgr::configIpTunnel(const TunnelInfo& tunInfo) +{ + int ret = 0; + std::string res; + + ret = cmdIpTunnelIfCreate(tunInfo, res); + if (ret != 0) + { + SWSS_LOG_WARN("Failed to create IP tunnel if (dst ip: %s, peer ip %s), res %s", + tunInfo.dst_ip.c_str(),tunInfo.remote_ip.c_str(), res.c_str()); + } + + ret = cmdIpTunnelIfUp(res); + if (ret != 0) + { + SWSS_LOG_WARN("Failed to enable IP tunnel intf (dst ip: %s, peer ip %s), res %s", + tunInfo.dst_ip.c_str(),tunInfo.remote_ip.c_str(), res.c_str()); + } + + auto it = m_intfCache.find(LOOPBACK_SRC); + if (it != m_intfCache.end()) + { + ret = cmdIpTunnelIfAddress(it->second.to_string(), res); + if (ret != 0) + { + SWSS_LOG_WARN("Failed to assign IP addr for tun if %s, res %s", + it->second.to_string().c_str(), res.c_str()); + } + } + return true; } diff --git a/cfgmgr/tunnelmgr.h b/cfgmgr/tunnelmgr.h index 7c84e3e476f5..e2b601abe929 100644 --- a/cfgmgr/tunnelmgr.h +++ b/cfgmgr/tunnelmgr.h @@ -6,18 +6,34 @@ namespace swss { +struct TunnelInfo +{ + std::string type; + std::string dst_ip; + std::string remote_ip; +}; + class TunnelMgr : public Orch { public: - TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, std::string tableName); + TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames); using Orch::doTask; private: void doTask(Consumer &consumer); - bool doIpInIpTunnelTask(const KeyOpFieldsValuesTuple & t); + bool doTunnelTask(const KeyOpFieldsValuesTuple & t); + bool doTunnelRouteTask(const KeyOpFieldsValuesTuple & t); + bool doLpbkIntfTask(const KeyOpFieldsValuesTuple & t); + + bool configIpTunnel(const TunnelInfo& info); ProducerStateTable m_appIpInIpTunnelTable; + Table m_cfgPeerTable; + + std::map m_tunnelCache; + std::map m_intfCache; + std::string m_peerIp; }; } diff --git a/cfgmgr/tunnelmgrd.cpp b/cfgmgr/tunnelmgrd.cpp index ea9e0871237f..d419b2b8867d 100644 --- a/cfgmgr/tunnelmgrd.cpp +++ b/cfgmgr/tunnelmgrd.cpp @@ -42,11 +42,15 @@ int main(int argc, char **argv) try { + vector cfgTunTables = { + CFG_TUNNEL_TABLE_NAME, + CFG_LOOPBACK_INTERFACE_TABLE_NAME + }; DBConnector cfgDb("CONFIG_DB", 0); DBConnector appDb("APPL_DB", 0); - TunnelMgr tunnelmgr(&cfgDb, &appDb, CFG_TUNNEL_TABLE_NAME); + TunnelMgr tunnelmgr(&cfgDb, &appDb, cfgTunTables); std::vector cfgOrchList = {&tunnelmgr}; diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 18bab8b10c3c..124a1cda7ce2 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -18,11 +18,13 @@ #include "neighorch.h" #include "portsorch.h" #include "aclorch.h" +#include "routeorch.h" /* Global variables */ extern Directory gDirectory; extern CrmOrch *gCrmOrch; extern NeighOrch *gNeighOrch; +extern RouteOrch *gRouteOrch; extern AclOrch *gAclOrch; extern PortsOrch *gPortsOrch; @@ -316,7 +318,7 @@ bool MuxCable::stateInitActive() { SWSS_LOG_INFO("Set state to Active from %s", muxStateValToString.at(state_).c_str()); - if (!nbrHandler()) + if (!nbrHandler(true, false)) { return false; } @@ -341,23 +343,11 @@ bool MuxCable::stateActive() return false; } - if (!nbrHandler()) + if (!nbrHandler(true)) { return false; } - if (remove_route(srv_ip4_) != SAI_STATUS_SUCCESS) - { - return false; - } - - if (remove_route(srv_ip6_) != SAI_STATUS_SUCCESS) - { - return false; - } - - mux_orch_->removeNextHopTunnel(MUX_TUNNEL, peer_ip4_); - return true; } @@ -372,36 +362,13 @@ bool MuxCable::stateStandby() return false; } - sai_object_id_t nh = mux_orch_->createNextHopTunnel(MUX_TUNNEL, peer_ip4_); - - if (nh == SAI_NULL_OBJECT_ID) - { - SWSS_LOG_INFO("Null NH object id, retry for %s", peer_ip4_.to_string().c_str()); - return false; - } - - if (create_route(srv_ip4_, nh) != SAI_STATUS_SUCCESS) - { - return false; - } - - if (create_route(srv_ip6_, nh) != SAI_STATUS_SUCCESS) - { - remove_route(srv_ip4_); - return false; - } - if (!nbrHandler(false)) { - remove_route(srv_ip4_); - remove_route(srv_ip6_); return false; } if (!aclHandler(port.m_port_id)) { - remove_route(srv_ip4_); - remove_route(srv_ip6_); SWSS_LOG_INFO("Add ACL drop rule failed for %s", mux_name_.c_str()); return false; } @@ -480,70 +447,232 @@ bool MuxCable::isIpInSubnet(IpAddress ip) } } -bool MuxCable::nbrHandler(bool enable) +bool MuxCable::nbrHandler(bool enable, bool update_rt) { if (enable) { - return nbr_handler_->enable(); + return nbr_handler_->enable(update_rt); + } + else + { + sai_object_id_t tnh = mux_orch_->createNextHopTunnel(MUX_TUNNEL, peer_ip4_); + if (tnh == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Null NH object id, retry for %s", peer_ip4_.to_string().c_str()); + return false; + } + + return nbr_handler_->disable(tnh); + } +} + +void MuxCable::updateNeighbor(NextHopKey nh, bool add) +{ + sai_object_id_t tnh = mux_orch_->getNextHopTunnelId(MUX_TUNNEL, peer_ip4_); + nbr_handler_->update(nh, tnh, add, state_); + if (add) + { + mux_orch_->addNexthop(nh, mux_name_); } else { - return nbr_handler_->disable(); + mux_orch_->removeNexthop(nh); } } -void MuxNbrHandler::update(IpAddress ip, string alias, bool add) +void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, MuxState state) { + SWSS_LOG_INFO("Neigh %s on %s, add %d, state %d", + nh.ip_address.to_string().c_str(), nh.alias.c_str(), add, state); + + MuxCableOrch* mux_cb_orch = gDirectory.get(); + IpPrefix pfx = nh.ip_address.to_string(); + if (add) { - neighbors_.add(ip); - if (!alias.empty() && alias != alias_) + if (!nh.alias.empty() && nh.alias != alias_) { - alias_ = alias; + alias_ = nh.alias; + } + + if (neighbors_.find(nh.ip_address) != neighbors_.end()) + { + return; + } + + switch (state) + { + case MuxState::MUX_STATE_INIT: + neighbors_[nh.ip_address] = SAI_NULL_OBJECT_ID; + break; + case MuxState::MUX_STATE_ACTIVE: + neighbors_[nh.ip_address] = gNeighOrch->getLocalNextHopId(nh); + break; + case MuxState::MUX_STATE_STANDBY: + neighbors_[nh.ip_address] = tunnelId; + mux_cb_orch->addTunnelRoute(nh); + create_route(pfx, tunnelId); + break; + default: + SWSS_LOG_NOTICE("State '%s' not handled for nbr %s update", + muxStateValToString.at(state).c_str(), nh.ip_address.to_string().c_str()); + break; } } else { - neighbors_.remove(ip); + /* if current state is standby, remove the tunnel route */ + if (state == MuxState::MUX_STATE_STANDBY) + { + remove_route(pfx); + mux_cb_orch->removeTunnelRoute(nh); + } + neighbors_.erase(nh.ip_address); } } -bool MuxNbrHandler::enable() +bool MuxNbrHandler::enable(bool update_rt) { NeighborEntry neigh; + MuxCableOrch* mux_cb_orch = gDirectory.get(); - auto it = neighbors_.getIpAddresses().begin(); - while (it != neighbors_.getIpAddresses().end()) + auto it = neighbors_.begin(); + while (it != neighbors_.end()) { - neigh = NeighborEntry(*it, alias_); + SWSS_LOG_INFO("Enabling neigh %s on %s", it->first.to_string().c_str(), alias_.c_str()); + + neigh = NeighborEntry(it->first, alias_); if (!gNeighOrch->enableNeighbor(neigh)) { + SWSS_LOG_INFO("Enabling neigh failed for %s", neigh.ip_address.to_string().c_str()); + return false; + } + + /* Update NH to point to learned neighbor */ + it->second = gNeighOrch->getLocalNextHopId(neigh); + + /* Reprogram route */ + NextHopKey nh_key = NextHopKey(it->first, alias_); + uint32_t num_routes = 0; + if (!gRouteOrch->updateNextHopRoutes(nh_key, num_routes)) + { + SWSS_LOG_INFO("Update route failed for NH %s", nh_key.ip_address.to_string().c_str()); + return false; + } + + /* Increment ref count for new NHs */ + gNeighOrch->increaseNextHopRefCount(nh_key, num_routes); + + /* + * Invalidate current nexthop group and update with new NH + * Ref count update is not required for tunnel NH IDs (nh_removed) + */ + uint32_t nh_removed, nh_added; + if (!gRouteOrch->invalidnexthopinNextHopGroup(nh_key, nh_removed)) + { + SWSS_LOG_ERROR("Removing existing NH failed for %s", nh_key.ip_address.to_string().c_str()); + return false; + } + + if (!gRouteOrch->validnexthopinNextHopGroup(nh_key, nh_added)) + { + SWSS_LOG_ERROR("Adding NH failed for %s", nh_key.ip_address.to_string().c_str()); return false; } + + /* Increment ref count for ECMP NH members */ + gNeighOrch->increaseNextHopRefCount(nh_key, nh_added); + + IpPrefix pfx = it->first.to_string(); + if (update_rt) + { + if (remove_route(pfx) != SAI_STATUS_SUCCESS) + { + return false; + } + mux_cb_orch->removeTunnelRoute(nh_key); + } + it++; } return true; } -bool MuxNbrHandler::disable() +bool MuxNbrHandler::disable(sai_object_id_t tnh) { NeighborEntry neigh; + MuxCableOrch* mux_cb_orch = gDirectory.get(); - auto it = neighbors_.getIpAddresses().begin(); - while (it != neighbors_.getIpAddresses().end()) + auto it = neighbors_.begin(); + while (it != neighbors_.end()) { - neigh = NeighborEntry(*it, alias_); + SWSS_LOG_INFO("Disabling neigh %s on %s", it->first.to_string().c_str(), alias_.c_str()); + + /* Update NH to point to Tunnel nexhtop */ + it->second = tnh; + + /* Reprogram route */ + NextHopKey nh_key = NextHopKey(it->first, alias_); + uint32_t num_routes = 0; + if (!gRouteOrch->updateNextHopRoutes(nh_key, num_routes)) + { + SWSS_LOG_INFO("Update route failed for NH %s", nh_key.ip_address.to_string().c_str()); + return false; + } + + /* Decrement ref count for old NHs */ + gNeighOrch->decreaseNextHopRefCount(nh_key, num_routes); + + /* Invalidate current nexthop group and update with new NH */ + uint32_t nh_removed, nh_added; + if (!gRouteOrch->invalidnexthopinNextHopGroup(nh_key, nh_removed)) + { + SWSS_LOG_ERROR("Removing existing NH failed for %s", nh_key.ip_address.to_string().c_str()); + return false; + } + + /* Decrement ref count for ECMP NH members */ + gNeighOrch->decreaseNextHopRefCount(nh_key, nh_removed); + + if (!gRouteOrch->validnexthopinNextHopGroup(nh_key, nh_added)) + { + SWSS_LOG_ERROR("Adding NH failed for %s", nh_key.ip_address.to_string().c_str()); + return false; + } + + neigh = NeighborEntry(it->first, alias_); if (!gNeighOrch->disableNeighbor(neigh)) + { + SWSS_LOG_INFO("Disabling neigh failed for %s", neigh.ip_address.to_string().c_str()); + return false; + } + + mux_cb_orch->addTunnelRoute(nh_key); + + IpPrefix pfx = it->first.to_string(); + if (create_route(pfx, it->second) != SAI_STATUS_SUCCESS) { return false; } + it++; } return true; } +sai_object_id_t MuxNbrHandler::getNextHopId(const NextHopKey nhKey) +{ + auto it = neighbors_.find(nhKey.ip_address); + if (it != neighbors_.end()) + { + return it->second; + } + + return SAI_NULL_OBJECT_ID; +} + std::map MuxAclHandler::acl_table_; MuxAclHandler::MuxAclHandler(sai_object_id_t port) @@ -626,7 +755,6 @@ sai_object_id_t MuxOrch::createNextHopTunnel(std::string tunnelKey, swss::IpAddr auto it = mux_tunnel_nh_.find(ipAddr); if (it != mux_tunnel_nh_.end()) { - ++it->second.ref_count; return it->second.nh_id; } @@ -666,6 +794,19 @@ bool MuxOrch::removeNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr return true; } +sai_object_id_t MuxOrch::getNextHopTunnelId(std::string tunnelKey, IpAddress& ipAddr) +{ + auto it = mux_tunnel_nh_.find(ipAddr); + if (it == mux_tunnel_nh_.end()) + { + SWSS_LOG_INFO("NH doesn't exist %s, ip %s", tunnelKey.c_str(), ipAddr.to_string().c_str()); + return SAI_NULL_OBJECT_ID; + } + + return it->second.nh_id; +} + + MuxCable* MuxOrch::findMuxCableInSubnet(IpAddress ip) { for (auto it = mux_cable_tb_.begin(); it != mux_cable_tb_.end(); it++) @@ -699,11 +840,40 @@ void MuxOrch::updateNeighbor(const NeighborUpdate& update) MuxCable* ptr = it->second.get(); if (ptr->isIpInSubnet(update.entry.ip_address)) { - ptr->updateNeighbor(update.entry.ip_address, update.entry.alias, update.add); + ptr->updateNeighbor(update.entry, update.add); } } } +void MuxOrch::addNexthop(NextHopKey nh, string muxName) +{ + mux_nexthop_tb_[nh] = muxName; +} + +void MuxOrch::removeNexthop(NextHopKey nh) +{ + mux_nexthop_tb_.erase(nh); +} + +sai_object_id_t MuxOrch::getNextHopId(const NextHopKey &nh) +{ + if (mux_nexthop_tb_.find(nh) == mux_nexthop_tb_.end()) + { + return SAI_NULL_OBJECT_ID; + } + + auto mux_name = mux_nexthop_tb_[nh]; + if (!isMuxExists(mux_name)) + { + SWSS_LOG_WARN("Mux entry for port '%s' doesn't exist", mux_name.c_str()); + return SAI_NULL_OBJECT_ID; + } + + auto ptr = getMuxCable(mux_name); + + return ptr->getNextHopId(nh); +} + void MuxOrch::update(SubjectType type, void *cntx) { SWSS_LOG_ENTER(); @@ -868,7 +1038,8 @@ bool MuxOrch::delOperation(const Request& request) } MuxCableOrch::MuxCableOrch(DBConnector *db, const std::string& tableName): - Orch2(db, tableName, request_) + Orch2(db, tableName, request_), + app_tunnel_route_table_(db, APP_TUNNEL_ROUTE_TABLE_NAME) { mux_table_ = unique_ptr(new Table(db, APP_HW_MUX_CABLE_TABLE_NAME)); } @@ -881,6 +1052,44 @@ void MuxCableOrch::updateMuxState(string portName, string muxState) mux_table_->set(portName, tuples); } +void MuxCableOrch::addTunnelRoute(const NextHopKey &nhKey) +{ + if (!nhKey.ip_address.isV4()) + { + SWSS_LOG_INFO("IPv6 tunnel route add '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str()); + return; + } + + vector data; + string key, alias = nhKey.alias; + + IpPrefix pfx = nhKey.ip_address.to_string(); + key = pfx.to_string(); + + FieldValueTuple fvTuple("alias", alias); + data.push_back(fvTuple); + + SWSS_LOG_INFO("Add tunnel route DB '%s:%s'", alias.c_str(), key.c_str()); + app_tunnel_route_table_.set(key, data); +} + +void MuxCableOrch::removeTunnelRoute(const NextHopKey &nhKey) +{ + if (!nhKey.ip_address.isV4()) + { + SWSS_LOG_INFO("IPv6 tunnel route remove '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str()); + return; + } + + string key, alias = nhKey.alias; + + IpPrefix pfx = nhKey.ip_address.to_string(); + key = pfx.to_string(); + + SWSS_LOG_INFO("Remove tunnel route DB '%s:%s'", alias.c_str(), key.c_str()); + app_tunnel_route_table_.del(key); +} + bool MuxCableOrch::addOperation(const Request& request) { SWSS_LOG_ENTER(); diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 2d3c15c1cd2d..2e97c5b6097a 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -9,6 +9,7 @@ #include "portsorch.h" #include "tunneldecaporch.h" #include "aclorch.h" +#include "neighorch.h" enum MuxState { @@ -49,18 +50,23 @@ class MuxAclHandler sai_object_id_t port_ = SAI_NULL_OBJECT_ID; }; -// Mux Neighbor Handler for adding/removing neigbhors +// IP to nexthop index mapping +typedef std::map MuxNeighbor; + +// Mux Neighbor Handler for adding/removing neighbors class MuxNbrHandler { public: MuxNbrHandler() = default; - bool enable(); - bool disable(); - void update(IpAddress, string alias = "", bool = true); + bool enable(bool update_rt); + bool disable(sai_object_id_t); + void update(NextHopKey nh, sai_object_id_t, bool = true, MuxState = MuxState::MUX_STATE_INIT); + + sai_object_id_t getNextHopId(const NextHopKey); private: - IpAddresses neighbors_; + MuxNeighbor neighbors_; string alias_; }; @@ -83,9 +89,10 @@ class MuxCable bool isStateChangeInProgress() { return st_chg_in_progress_; } bool isIpInSubnet(IpAddress ip); - void updateNeighbor(IpAddress ip, string alias, bool add) + void updateNeighbor(NextHopKey nh, bool add); + sai_object_id_t getNextHopId(const NextHopKey nh) { - nbr_handler_->update(ip, alias, add); + return nbr_handler_->getNextHopId(nh); } private: @@ -93,8 +100,8 @@ class MuxCable bool stateInitActive(); bool stateStandby(); - bool aclHandler(sai_object_id_t, bool = true); - bool nbrHandler(bool = true); + bool aclHandler(sai_object_id_t, bool add = true); + bool nbrHandler(bool enable, bool update_routes = true); string mux_name_; @@ -133,6 +140,7 @@ struct NHTunnel typedef std::unique_ptr MuxCable_T; typedef std::map MuxCableTb; typedef std::map MuxTunnelNHs; +typedef std::map NextHopTb; class MuxCfgRequest : public Request { @@ -164,8 +172,13 @@ class MuxOrch : public Orch2, public Observer, public Subject void update(SubjectType, void *); void updateNeighbor(const NeighborUpdate&); + void addNexthop(NextHopKey, string); + void removeNexthop(NextHopKey); + sai_object_id_t getNextHopId(const NextHopKey&); + sai_object_id_t createNextHopTunnel(std::string tunnelKey, IpAddress& ipAddr); bool removeNextHopTunnel(std::string tunnelKey, IpAddress& ipAddr); + sai_object_id_t getNextHopTunnelId(std::string tunnelKey, IpAddress& ipAddr); private: virtual bool addOperation(const Request& request); @@ -179,6 +192,7 @@ class MuxOrch : public Orch2, public Observer, public Subject MuxCableTb mux_cable_tb_; MuxTunnelNHs mux_tunnel_nh_; + NextHopTb mux_nexthop_tb_; handler_map handler_map_; @@ -208,6 +222,8 @@ class MuxCableOrch : public Orch2 MuxCableOrch(DBConnector *db, const std::string& tableName); void updateMuxState(string portName, string muxState); + void addTunnelRoute(const NextHopKey &nhKey); + void removeTunnelRoute(const NextHopKey &nhKey); private: virtual bool addOperation(const Request& request); @@ -215,6 +231,7 @@ class MuxCableOrch : public Orch2 unique_ptr
mux_table_; MuxCableRequest request_; + ProducerStateTable app_tunnel_route_table_; }; const request_description_t mux_state_request_description = { diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 246d044a8b37..267e51544a98 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -135,8 +135,17 @@ void NeighOrch::update(SubjectType type, void *cntx) return; } + bool NeighOrch::hasNextHop(const NextHopKey &nexthop) { + // First check if mux has NH + MuxOrch* mux_orch = gDirectory.get(); + sai_object_id_t nhid = mux_orch->getNextHopId(nexthop); + if (nhid != SAI_NULL_OBJECT_ID) + { + return true; + } + return m_syncdNextHops.find(nexthop) != m_syncdNextHops.end(); } @@ -250,11 +259,11 @@ bool NeighOrch::setNextHopFlag(const NextHopKey &nexthop, const uint32_t nh_flag } nhop->second.nh_flags |= nh_flag; - + uint32_t count; switch (nh_flag) { case NHFLAGS_IFDOWN: - rc = gRouteOrch->invalidnexthopinNextHopGroup(nexthop); + rc = gRouteOrch->invalidnexthopinNextHopGroup(nexthop, count); break; default: assert(0); @@ -279,11 +288,11 @@ bool NeighOrch::clearNextHopFlag(const NextHopKey &nexthop, const uint32_t nh_fl } nhop->second.nh_flags &= ~nh_flag; - + uint32_t count; switch (nh_flag) { case NHFLAGS_IFDOWN: - rc = gRouteOrch->validnexthopinNextHopGroup(nexthop); + rc = gRouteOrch->validnexthopinNextHopGroup(nexthop, count); break; default: assert(0); @@ -391,9 +400,31 @@ bool NeighOrch::removeOverlayNextHop(const NextHopKey &nexthop) return true; } +sai_object_id_t NeighOrch::getLocalNextHopId(const NextHopKey& nexthop) +{ + if (m_syncdNextHops.find(nexthop) == m_syncdNextHops.end()) + { + return SAI_NULL_OBJECT_ID; + } + + return m_syncdNextHops[nexthop].next_hop_id; +} + sai_object_id_t NeighOrch::getNextHopId(const NextHopKey &nexthop) { assert(hasNextHop(nexthop)); + + /* + * The nexthop id could be varying depending on the use-case + * For e.g, a route could have a direct neighbor but may require + * to be tx via tunnel nexthop + */ + MuxOrch* mux_orch = gDirectory.get(); + sai_object_id_t nhid = mux_orch->getNextHopId(nexthop); + if (nhid != SAI_NULL_OBJECT_ID) + { + return nhid; + } return m_syncdNextHops[nexthop].next_hop_id; } @@ -403,16 +434,22 @@ int NeighOrch::getNextHopRefCount(const NextHopKey &nexthop) return m_syncdNextHops[nexthop].ref_count; } -void NeighOrch::increaseNextHopRefCount(const NextHopKey &nexthop) +void NeighOrch::increaseNextHopRefCount(const NextHopKey &nexthop, uint32_t count) { assert(hasNextHop(nexthop)); - m_syncdNextHops[nexthop].ref_count ++; + if (m_syncdNextHops.find(nexthop) != m_syncdNextHops.end()) + { + m_syncdNextHops[nexthop].ref_count += count; + } } -void NeighOrch::decreaseNextHopRefCount(const NextHopKey &nexthop) +void NeighOrch::decreaseNextHopRefCount(const NextHopKey &nexthop, uint32_t count) { assert(hasNextHop(nexthop)); - m_syncdNextHops[nexthop].ref_count --; + if (m_syncdNextHops.find(nexthop) != m_syncdNextHops.end()) + { + m_syncdNextHops[nexthop].ref_count -= count; + } } bool NeighOrch::getNeighborEntry(const NextHopKey &nexthop, NeighborEntry &neighborEntry, MacAddress &macAddress) diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index 20b6f3913cb6..60981e9f11c2 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -50,10 +50,11 @@ class NeighOrch : public Orch, public Subject, public Observer bool hasNextHop(const NextHopKey&); sai_object_id_t getNextHopId(const NextHopKey&); + sai_object_id_t getLocalNextHopId(const NextHopKey&); int getNextHopRefCount(const NextHopKey&); - void increaseNextHopRefCount(const NextHopKey&); - void decreaseNextHopRefCount(const NextHopKey&); + void increaseNextHopRefCount(const NextHopKey&, uint32_t count = 1); + void decreaseNextHopRefCount(const NextHopKey&, uint32_t count = 1); bool getNeighborEntry(const NextHopKey&, NeighborEntry&, MacAddress&); bool getNeighborEntry(const IpAddress&, NeighborEntry&, MacAddress&); diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 4eb830f8fc8c..fdec0db3cfd5 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -295,12 +295,13 @@ void RouteOrch::detach(Observer *observer, const IpAddress& dstAddr, sai_object_ } } -bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop) +bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& count) { SWSS_LOG_ENTER(); sai_object_id_t nexthop_id; sai_status_t status; + count = 0; for (auto nhopgroup = m_syncdNextHopGroups.begin(); nhopgroup != m_syncdNextHopGroups.end(); ++nhopgroup) @@ -333,6 +334,7 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop) return false; } + ++count; gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); nhopgroup->second.nhopgroup_members[nexthop] = nexthop_id; } @@ -345,12 +347,13 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop) return true; } -bool RouteOrch::invalidnexthopinNextHopGroup(const NextHopKey &nexthop) +bool RouteOrch::invalidnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& count) { SWSS_LOG_ENTER(); sai_object_id_t nexthop_id; sai_status_t status; + count = 0; for (auto nhopgroup = m_syncdNextHopGroups.begin(); nhopgroup != m_syncdNextHopGroups.end(); ++nhopgroup) @@ -371,6 +374,7 @@ bool RouteOrch::invalidnexthopinNextHopGroup(const NextHopKey &nexthop) return false; } + ++count; gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); } @@ -562,7 +566,7 @@ void RouteOrch::doTask(Consumer& consumer) * way is to create loopback interface and then create * route pointing to it, so that we can traps packets to * CPU */ - if (alias == "eth0" || alias == "docker0" || + if (alias == "eth0" || alias == "docker0" || alias == "tun0" || alias == "lo" || !alias.compare(0, strlen(LOOPBACK_PREFIX), LOOPBACK_PREFIX)) { excp_intfs_flag = true; @@ -1190,6 +1194,52 @@ bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) return true; } +bool RouteOrch::updateNextHopRoutes(const NextHopKey& nextHop, uint32_t& numRoutes) +{ + numRoutes = 0; + sai_route_entry_t route_entry; + sai_attribute_t route_attr; + sai_object_id_t next_hop_id; + + for (auto rt_table : m_syncdRoutes) + { + for (auto rt_entry : rt_table.second) + { + // Skip routes with ecmp nexthops + if (rt_entry.second.getSize() > 1) + { + continue; + } + + if (rt_entry.second.contains(nextHop)) + { + SWSS_LOG_INFO("Updating route %s during nexthop status change", + rt_entry.first.to_string().c_str()); + next_hop_id = m_neighOrch->getNextHopId(nextHop); + + route_entry.vr_id = rt_table.first; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, rt_entry.first); + + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + route_attr.value.oid = next_hop_id; + + sai_status_t status = sai_route_api->set_route_entry_attribute(&route_entry, &route_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update route %s, rv:%d", + rt_entry.first.to_string().c_str(), status); + return false; + } + + ++numRoutes; + } + } + } + + return true; +} + void RouteOrch::addTempRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) { SWSS_LOG_ENTER(); diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 214c2bd4ae99..a2ca2bdc90c1 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -107,8 +107,10 @@ class RouteOrch : public Orch, public Subject bool addNextHopGroup(const NextHopGroupKey&); bool removeNextHopGroup(const NextHopGroupKey&); - bool validnexthopinNextHopGroup(const NextHopKey&); - bool invalidnexthopinNextHopGroup(const NextHopKey&); + bool updateNextHopRoutes(const NextHopKey&, uint32_t&); + + bool validnexthopinNextHopGroup(const NextHopKey&, uint32_t&); + bool invalidnexthopinNextHopGroup(const NextHopKey&, uint32_t&); bool createRemoteVtep(sai_object_id_t, const NextHopKey&); bool deleteRemoteVtep(sai_object_id_t, const NextHopKey&);